SuperCS commited on
Commit
6c17566
·
verified ·
1 Parent(s): 803b6ee

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_cosmos.py +1110 -0
  2. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1096 -0
  3. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_ltx.py +1557 -0
  4. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_magvit.py +1094 -0
  5. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py +1131 -0
  6. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_qwenimage.py +1070 -0
  7. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +363 -0
  8. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_wan.py +1419 -0
  9. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_oobleck.py +465 -0
  10. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_tiny.py +346 -0
  11. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/consistency_decoder_vae.py +462 -0
  12. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/vae.py +896 -0
  13. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/vq_model.py +185 -0
  14. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/__init__.py +26 -0
  15. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet.py +867 -0
  16. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_flax.py +408 -0
  17. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_flux.py +509 -0
  18. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
  19. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_qwenimage.py +359 -0
  20. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_sana.py +290 -0
  21. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_sd3.py +513 -0
  22. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_sparsectrl.py +785 -0
  23. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_union.py +841 -0
  24. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_xs.py +1907 -0
  25. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/multicontrolnet.py +182 -0
  26. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/multicontrolnet_union.py +195 -0
  27. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/__init__.py +40 -0
  28. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/auraflow_transformer_2d.py +564 -0
  29. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/cogvideox_transformer_3d.py +531 -0
  30. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/consisid_transformer_3d.py +789 -0
  31. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/dit_transformer_2d.py +226 -0
  32. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/dual_transformer_2d.py +156 -0
  33. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/hunyuan_transformer_2d.py +579 -0
  34. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/latte_transformer_3d.py +331 -0
  35. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/lumina_nextdit2d.py +342 -0
  36. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/pixart_transformer_2d.py +430 -0
  37. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/prior_transformer.py +384 -0
  38. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/sana_transformer.py +597 -0
  39. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/stable_audio_transformer.py +439 -0
  40. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/t5_film_transformer.py +436 -0
  41. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_2d.py +551 -0
  42. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_allegro.py +414 -0
  43. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_bria.py +719 -0
  44. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_chroma.py +641 -0
  45. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_cogview3plus.py +370 -0
  46. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_cogview4.py +788 -0
  47. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_cosmos.py +586 -0
  48. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_easyanimate.py +527 -0
  49. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_flux.py +776 -0
  50. exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_hidream_image.py +942 -0
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_cosmos.py ADDED
@@ -0,0 +1,1110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The NVIDIA Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...utils import get_logger
24
+ from ...utils.accelerate_utils import apply_forward_hook
25
+ from ..modeling_outputs import AutoencoderKLOutput
26
+ from ..modeling_utils import ModelMixin
27
+ from .vae import DecoderOutput, IdentityDistribution
28
+
29
+
30
+ logger = get_logger(__name__)
31
+
32
+
33
+ # fmt: off
34
+ # These latents and means are from CV8x8x8-1.0. Each checkpoint has different values, but since this is the main VAE used,
35
+ # we will default to these values.
36
+ LATENTS_MEAN = [0.11362758, -0.0171717, 0.03071163, 0.02046862, 0.01931456, 0.02138567, 0.01999342, 0.02189187, 0.02011935, 0.01872694, 0.02168613, 0.02207148, 0.01986941, 0.01770413, 0.02067643, 0.02028245, 0.19125476, 0.04556972, 0.0595558, 0.05315534, 0.05496629, 0.05356264, 0.04856596, 0.05327453, 0.05410472, 0.05597149, 0.05524866, 0.05181874, 0.05071663, 0.05204537, 0.0564108, 0.05518042, 0.01306714, 0.03341161, 0.03847246, 0.02810185, 0.02790166, 0.02920026, 0.02823597, 0.02631033, 0.0278531, 0.02880507, 0.02977769, 0.03145441, 0.02888389, 0.03280773, 0.03484927, 0.03049198, -0.00197727, 0.07534957, 0.04963879, 0.05530893, 0.05410828, 0.05252541, 0.05029899, 0.05321025, 0.05149245, 0.0511921, 0.04643495, 0.04604527, 0.04631618, 0.04404101, 0.04403536, 0.04499495, -0.02994183, -0.04787003, -0.01064558, -0.01779824, -0.01490502, -0.02157517, -0.0204778, -0.02180816, -0.01945375, -0.02062863, -0.02192209, -0.02520639, -0.02246656, -0.02427533, -0.02683363, -0.02762006, 0.08019473, -0.13005368, -0.07568636, -0.06082374, -0.06036175, -0.05875364, -0.05921887, -0.05869788, -0.05273941, -0.052565, -0.05346428, -0.05456541, -0.053657, -0.05656897, -0.05728589, -0.05321847, 0.16718403, -0.00390146, 0.0379406, 0.0356561, 0.03554131, 0.03924074, 0.03873615, 0.04187329, 0.04226924, 0.04378717, 0.04684274, 0.05117614, 0.04547792, 0.05251586, 0.05048339, 0.04950784, 0.09564418, 0.0547128, 0.08183969, 0.07978633, 0.08076023, 0.08108605, 0.08011818, 0.07965573, 0.08187773, 0.08350263, 0.08101469, 0.0786941, 0.0774442, 0.07724521, 0.07830418, 0.07599796, -0.04987567, 0.05923908, -0.01058746, -0.01177603, -0.01116162, -0.01364149, -0.01546014, -0.0117213, -0.01780043, -0.01648314, -0.02100247, -0.02104417, -0.02482123, -0.02611689, -0.02561143, -0.02597336, -0.05364667, 0.08211684, 0.04686937, 0.04605641, 0.04304186, 0.0397355, 0.03686767, 0.04087112, 0.03704741, 0.03706401, 0.03120073, 0.03349091, 0.03319963, 0.03205781, 0.03195127, 0.03180481, 0.16427967, -0.11048453, -0.04595276, -0.04982893, -0.05213465, -0.04809378, -0.05080318, -0.04992863, -0.04493337, -0.0467619, -0.04884703, -0.04627892, -0.04913311, -0.04955709, -0.04533982, -0.04570218, -0.10612928, -0.05121198, -0.06761009, -0.07251801, -0.07265285, -0.07417855, -0.07202412, -0.07499027, -0.07625481, -0.07535747, -0.07638787, -0.07920305, -0.07596069, -0.07959418, -0.08265036, -0.07955471, -0.16888915, 0.0753242, 0.04062594, 0.03375093, 0.03337452, 0.03699376, 0.03651138, 0.03611023, 0.03555622, 0.03378554, 0.0300498, 0.03395559, 0.02941847, 0.03156432, 0.03431173, 0.03016853, -0.03415358, -0.01699573, -0.04029295, -0.04912157, -0.0498858, -0.04917918, -0.04918056, -0.0525189, -0.05325506, -0.05341973, -0.04983329, -0.04883146, -0.04985548, -0.04736718, -0.0462027, -0.04836091, 0.02055675, 0.03419799, -0.02907669, -0.04350509, -0.04156144, -0.04234421, -0.04446109, -0.04461774, -0.04882839, -0.04822346, -0.04502493, -0.0506244, -0.05146913, -0.04655267, -0.04862994, -0.04841615, 0.20312774, -0.07208502, -0.03635615, -0.03556088, -0.04246174, -0.04195838, -0.04293778, -0.04071276, -0.04240569, -0.04125213, -0.04395144, -0.03959096, -0.04044993, -0.04015875, -0.04088107, -0.03885176]
37
+ LATENTS_STD = [0.56700271, 0.65488982, 0.65589428, 0.66524369, 0.66619784, 0.6666382, 0.6720838, 0.66955978, 0.66928875, 0.67108786, 0.67092526, 0.67397463, 0.67894882, 0.67668313, 0.67769569, 0.67479557, 0.85245121, 0.8688373, 0.87348086, 0.88459337, 0.89135885, 0.8910504, 0.89714909, 0.89947474, 0.90201765, 0.90411824, 0.90692616, 0.90847772, 0.90648711, 0.91006982, 0.91033435, 0.90541548, 0.84960359, 0.85863352, 0.86895317, 0.88460612, 0.89245003, 0.89451706, 0.89931005, 0.90647358, 0.90338236, 0.90510076, 0.91008312, 0.90961218, 0.9123717, 0.91313171, 0.91435546, 0.91565102, 0.91877103, 0.85155135, 0.857804, 0.86998034, 0.87365264, 0.88161767, 0.88151032, 0.88758916, 0.89015514, 0.89245576, 0.89276224, 0.89450496, 0.90054202, 0.89994133, 0.90136105, 0.90114892, 0.77755755, 0.81456852, 0.81911844, 0.83137071, 0.83820474, 0.83890373, 0.84401101, 0.84425181, 0.84739357, 0.84798753, 0.85249585, 0.85114998, 0.85160935, 0.85626358, 0.85677862, 0.85641026, 0.69903517, 0.71697885, 0.71696913, 0.72583169, 0.72931731, 0.73254126, 0.73586977, 0.73734969, 0.73664582, 0.74084908, 0.74399322, 0.74471819, 0.74493188, 0.74824578, 0.75024873, 0.75274801, 0.8187142, 0.82251883, 0.82616025, 0.83164483, 0.84072375, 0.8396467, 0.84143305, 0.84880769, 0.8503468, 0.85196948, 0.85211051, 0.85386664, 0.85410017, 0.85439342, 0.85847849, 0.85385275, 0.67583984, 0.68259847, 0.69198853, 0.69928843, 0.70194328, 0.70467001, 0.70755547, 0.70917857, 0.71007699, 0.70963502, 0.71064079, 0.71027333, 0.71291167, 0.71537536, 0.71902508, 0.71604162, 0.72450989, 0.71979928, 0.72057378, 0.73035461, 0.73329622, 0.73660028, 0.73891461, 0.74279994, 0.74105692, 0.74002433, 0.74257588, 0.74416119, 0.74543899, 0.74694443, 0.74747062, 0.74586403, 0.90176988, 0.90990674, 0.91106802, 0.92163783, 0.92390233, 0.93056196, 0.93482202, 0.93642414, 0.93858379, 0.94064975, 0.94078934, 0.94325715, 0.94955301, 0.94814706, 0.95144123, 0.94923073, 0.49853548, 0.64968109, 0.6427654, 0.64966393, 0.6487664, 0.65203559, 0.6584242, 0.65351611, 0.65464371, 0.6574859, 0.65626335, 0.66123748, 0.66121179, 0.66077942, 0.66040152, 0.66474909, 0.61986589, 0.69138134, 0.6884557, 0.6955843, 0.69765401, 0.70015347, 0.70529598, 0.70468754, 0.70399523, 0.70479989, 0.70887572, 0.71126866, 0.7097227, 0.71249932, 0.71231949, 0.71175605, 0.35586974, 0.68723857, 0.68973219, 0.69958478, 0.6943453, 0.6995818, 0.70980215, 0.69899458, 0.70271689, 0.70095056, 0.69912851, 0.70522696, 0.70392174, 0.70916915, 0.70585734, 0.70373541, 0.98101336, 0.89024764, 0.89607251, 0.90678179, 0.91308665, 0.91812348, 0.91980827, 0.92480654, 0.92635667, 0.92887944, 0.93338072, 0.93468094, 0.93619436, 0.93906063, 0.94191772, 0.94471723, 0.83202779, 0.84106231, 0.84463632, 0.85829508, 0.86319661, 0.86751342, 0.86914337, 0.87085921, 0.87286359, 0.87537396, 0.87931138, 0.88054478, 0.8811838, 0.88872558, 0.88942474, 0.88934827, 0.44025335, 0.63061613, 0.63110614, 0.63601959, 0.6395812, 0.64104342, 0.65019929, 0.6502797, 0.64355946, 0.64657205, 0.64847094, 0.64728117, 0.64972943, 0.65162975, 0.65328044, 0.64914775]
38
+ _WAVELETS = {
39
+ "haar": torch.tensor([0.7071067811865476, 0.7071067811865476]),
40
+ "rearrange": torch.tensor([1.0, 1.0]),
41
+ }
42
+ # fmt: on
43
+
44
+
45
+ class CosmosCausalConv3d(nn.Conv3d):
46
+ def __init__(
47
+ self,
48
+ in_channels: int = 1,
49
+ out_channels: int = 1,
50
+ kernel_size: Union[int, Tuple[int, int, int]] = (3, 3, 3),
51
+ dilation: Union[int, Tuple[int, int, int]] = (1, 1, 1),
52
+ stride: Union[int, Tuple[int, int, int]] = (1, 1, 1),
53
+ padding: int = 1,
54
+ pad_mode: str = "constant",
55
+ ) -> None:
56
+ kernel_size = (kernel_size, kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size
57
+ dilation = (dilation, dilation, dilation) if isinstance(dilation, int) else dilation
58
+ stride = (stride, stride, stride) if isinstance(stride, int) else stride
59
+
60
+ _, height_kernel_size, width_kernel_size = kernel_size
61
+ assert height_kernel_size % 2 == 1 and width_kernel_size % 2 == 1
62
+
63
+ super().__init__(
64
+ in_channels,
65
+ out_channels,
66
+ kernel_size,
67
+ stride=stride,
68
+ dilation=dilation,
69
+ )
70
+
71
+ self.pad_mode = pad_mode
72
+ self.temporal_pad = dilation[0] * (kernel_size[0] - 1) + (1 - stride[0])
73
+ self.spatial_pad = (padding, padding, padding, padding)
74
+
75
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
76
+ hidden_states_prev = hidden_states[:, :, :1, ...].repeat(1, 1, self.temporal_pad, 1, 1)
77
+ hidden_states = torch.cat([hidden_states_prev, hidden_states], dim=2)
78
+ hidden_states = F.pad(hidden_states, (*self.spatial_pad, 0, 0), mode=self.pad_mode, value=0.0)
79
+ return super().forward(hidden_states)
80
+
81
+
82
+ class CosmosCausalGroupNorm(torch.nn.Module):
83
+ def __init__(self, in_channels: int, num_groups: int = 1):
84
+ super().__init__()
85
+ self.norm = nn.GroupNorm(
86
+ num_groups=num_groups,
87
+ num_channels=in_channels,
88
+ eps=1e-6,
89
+ affine=True,
90
+ )
91
+ self.num_groups = num_groups
92
+
93
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
94
+ if self.num_groups == 1:
95
+ batch_size = hidden_states.size(0)
96
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W]
97
+ hidden_states = self.norm(hidden_states)
98
+ hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(
99
+ 0, 2, 1, 3, 4
100
+ ) # [B * T, C, H, W] -> [B, C, T, H, W]
101
+ else:
102
+ hidden_states = self.norm(hidden_states)
103
+ return hidden_states
104
+
105
+
106
+ class CosmosPatchEmbed3d(nn.Module):
107
+ def __init__(self, patch_size: int = 1, patch_method: str = "haar") -> None:
108
+ super().__init__()
109
+
110
+ self.patch_size = patch_size
111
+ self.patch_method = patch_method
112
+
113
+ wavelets = _WAVELETS.get(patch_method).clone()
114
+ arange = torch.arange(wavelets.shape[0])
115
+
116
+ self.register_buffer("wavelets", wavelets, persistent=False)
117
+ self.register_buffer("_arange", arange, persistent=False)
118
+
119
+ def _dwt(self, hidden_states: torch.Tensor, mode: str = "reflect", rescale=False) -> torch.Tensor:
120
+ dtype = hidden_states.dtype
121
+ wavelets = self.wavelets
122
+
123
+ n = wavelets.shape[0]
124
+ g = hidden_states.shape[1]
125
+ hl = wavelets.flip(0).reshape(1, 1, -1).repeat(g, 1, 1)
126
+ hh = (wavelets * ((-1) ** self._arange)).reshape(1, 1, -1).repeat(g, 1, 1)
127
+ hh = hh.to(dtype=dtype)
128
+ hl = hl.to(dtype=dtype)
129
+
130
+ # Handles temporal axis
131
+ hidden_states = F.pad(hidden_states, pad=(max(0, n - 2), n - 1, n - 2, n - 1, n - 2, n - 1), mode=mode).to(
132
+ dtype
133
+ )
134
+ xl = F.conv3d(hidden_states, hl.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1))
135
+ xh = F.conv3d(hidden_states, hh.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1))
136
+
137
+ # Handles spatial axes
138
+ xll = F.conv3d(xl, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1))
139
+ xlh = F.conv3d(xl, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1))
140
+ xhl = F.conv3d(xh, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1))
141
+ xhh = F.conv3d(xh, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1))
142
+
143
+ xlll = F.conv3d(xll, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
144
+ xllh = F.conv3d(xll, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
145
+ xlhl = F.conv3d(xlh, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
146
+ xlhh = F.conv3d(xlh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
147
+ xhll = F.conv3d(xhl, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
148
+ xhlh = F.conv3d(xhl, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
149
+ xhhl = F.conv3d(xhh, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
150
+ xhhh = F.conv3d(xhh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
151
+
152
+ hidden_states = torch.cat([xlll, xllh, xlhl, xlhh, xhll, xhlh, xhhl, xhhh], dim=1)
153
+ if rescale:
154
+ hidden_states = hidden_states / 8**0.5
155
+ return hidden_states
156
+
157
+ def _haar(self, hidden_states: torch.Tensor) -> torch.Tensor:
158
+ xi, xv = torch.split(hidden_states, [1, hidden_states.shape[2] - 1], dim=2)
159
+ hidden_states = torch.cat([xi.repeat_interleave(self.patch_size, dim=2), xv], dim=2)
160
+ for _ in range(int(math.log2(self.patch_size))):
161
+ hidden_states = self._dwt(hidden_states, rescale=True)
162
+ return hidden_states
163
+
164
+ def _arrange(self, hidden_states: torch.Tensor) -> torch.Tensor:
165
+ xi, xv = torch.split(hidden_states, [1, hidden_states.shape[2] - 1], dim=2)
166
+ hidden_states = torch.cat([xi.repeat_interleave(self.patch_size, dim=2), xv], dim=2)
167
+
168
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
169
+ p = self.patch_size
170
+
171
+ hidden_states = hidden_states.reshape(
172
+ batch_size, num_channels, num_frames // p, p, height // p, p, width // p, p
173
+ )
174
+ hidden_states = hidden_states.permute(0, 1, 3, 5, 7, 2, 4, 6).flatten(1, 4).contiguous()
175
+ return hidden_states
176
+
177
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
178
+ if self.patch_method == "haar":
179
+ return self._haar(hidden_states)
180
+ elif self.patch_method == "rearrange":
181
+ return self._arrange(hidden_states)
182
+ else:
183
+ raise ValueError(f"Unsupported patch method: {self.patch_method}")
184
+
185
+
186
+ class CosmosUnpatcher3d(nn.Module):
187
+ def __init__(self, patch_size: int = 1, patch_method: str = "haar"):
188
+ super().__init__()
189
+
190
+ self.patch_size = patch_size
191
+ self.patch_method = patch_method
192
+
193
+ wavelets = _WAVELETS.get(patch_method).clone()
194
+ arange = torch.arange(wavelets.shape[0])
195
+
196
+ self.register_buffer("wavelets", wavelets, persistent=False)
197
+ self.register_buffer("_arange", arange, persistent=False)
198
+
199
+ def _idwt(self, hidden_states: torch.Tensor, rescale: bool = False) -> torch.Tensor:
200
+ device = hidden_states.device
201
+ dtype = hidden_states.dtype
202
+ h = self.wavelets.to(device)
203
+
204
+ g = hidden_states.shape[1] // 8 # split into 8 spatio-temporal filtered tesnors.
205
+ hl = h.flip([0]).reshape(1, 1, -1).repeat([g, 1, 1])
206
+ hh = (h * ((-1) ** self._arange.to(device))).reshape(1, 1, -1).repeat(g, 1, 1)
207
+ hl = hl.to(dtype=dtype)
208
+ hh = hh.to(dtype=dtype)
209
+
210
+ xlll, xllh, xlhl, xlhh, xhll, xhlh, xhhl, xhhh = torch.chunk(hidden_states, 8, dim=1)
211
+
212
+ # Handle height transposed convolutions
213
+ xll = F.conv_transpose3d(xlll, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
214
+ xll = F.conv_transpose3d(xllh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xll
215
+
216
+ xlh = F.conv_transpose3d(xlhl, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
217
+ xlh = F.conv_transpose3d(xlhh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xlh
218
+
219
+ xhl = F.conv_transpose3d(xhll, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
220
+ xhl = F.conv_transpose3d(xhlh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xhl
221
+
222
+ xhh = F.conv_transpose3d(xhhl, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2))
223
+ xhh = F.conv_transpose3d(xhhh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xhh
224
+
225
+ # Handles width transposed convolutions
226
+ xl = F.conv_transpose3d(xll, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1))
227
+ xl = F.conv_transpose3d(xlh, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + xl
228
+ xh = F.conv_transpose3d(xhl, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1))
229
+ xh = F.conv_transpose3d(xhh, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + xh
230
+
231
+ # Handles time axis transposed convolutions
232
+ hidden_states = F.conv_transpose3d(xl, hl.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1))
233
+ hidden_states = (
234
+ F.conv_transpose3d(xh, hh.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1)) + hidden_states
235
+ )
236
+
237
+ if rescale:
238
+ hidden_states = hidden_states * 8**0.5
239
+
240
+ return hidden_states
241
+
242
+ def _ihaar(self, hidden_states: torch.Tensor) -> torch.Tensor:
243
+ for _ in range(int(math.log2(self.patch_size))):
244
+ hidden_states = self._idwt(hidden_states, rescale=True)
245
+ hidden_states = hidden_states[:, :, self.patch_size - 1 :, ...]
246
+ return hidden_states
247
+
248
+ def _irearrange(self, hidden_states: torch.Tensor) -> torch.Tensor:
249
+ p = self.patch_size
250
+ hidden_states = hidden_states.unflatten(1, (-1, p, p, p))
251
+ hidden_states = hidden_states.permute(0, 1, 5, 2, 6, 3, 7, 4)
252
+ hidden_states = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3)
253
+ hidden_states = hidden_states[:, :, p - 1 :, ...]
254
+ return hidden_states
255
+
256
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
257
+ if self.patch_method == "haar":
258
+ return self._ihaar(hidden_states)
259
+ elif self.patch_method == "rearrange":
260
+ return self._irearrange(hidden_states)
261
+ else:
262
+ raise ValueError("Unknown patch method: " + self.patch_method)
263
+
264
+
265
+ class CosmosConvProjection3d(nn.Module):
266
+ def __init__(self, in_channels: int, out_channels: int) -> None:
267
+ super().__init__()
268
+
269
+ self.conv_s = CosmosCausalConv3d(in_channels, out_channels, kernel_size=(1, 3, 3), stride=1, padding=1)
270
+ self.conv_t = CosmosCausalConv3d(out_channels, out_channels, kernel_size=(3, 1, 1), stride=1, padding=0)
271
+
272
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
273
+ hidden_states = self.conv_s(hidden_states)
274
+ hidden_states = self.conv_t(hidden_states)
275
+ return hidden_states
276
+
277
+
278
+ class CosmosResnetBlock3d(nn.Module):
279
+ def __init__(
280
+ self,
281
+ in_channels: int,
282
+ out_channels: int,
283
+ dropout: float = 0.0,
284
+ num_groups: int = 1,
285
+ ) -> None:
286
+ super().__init__()
287
+ out_channels = out_channels or in_channels
288
+
289
+ self.norm1 = CosmosCausalGroupNorm(in_channels, num_groups)
290
+ self.conv1 = CosmosConvProjection3d(in_channels, out_channels)
291
+
292
+ self.norm2 = CosmosCausalGroupNorm(out_channels, num_groups)
293
+ self.dropout = nn.Dropout(dropout)
294
+ self.conv2 = CosmosConvProjection3d(out_channels, out_channels)
295
+
296
+ if in_channels != out_channels:
297
+ self.conv_shortcut = CosmosCausalConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
298
+ else:
299
+ self.conv_shortcut = nn.Identity()
300
+
301
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
302
+ residual = hidden_states
303
+ residual = self.conv_shortcut(residual)
304
+
305
+ hidden_states = self.norm1(hidden_states)
306
+ hidden_states = F.silu(hidden_states)
307
+ hidden_states = self.conv1(hidden_states)
308
+
309
+ hidden_states = self.norm2(hidden_states)
310
+ hidden_states = F.silu(hidden_states)
311
+ hidden_states = self.dropout(hidden_states)
312
+ hidden_states = self.conv2(hidden_states)
313
+
314
+ return hidden_states + residual
315
+
316
+
317
+ class CosmosDownsample3d(nn.Module):
318
+ def __init__(
319
+ self,
320
+ in_channels: int,
321
+ spatial_downsample: bool = True,
322
+ temporal_downsample: bool = True,
323
+ ) -> None:
324
+ super().__init__()
325
+
326
+ self.spatial_downsample = spatial_downsample
327
+ self.temporal_downsample = temporal_downsample
328
+
329
+ self.conv1 = nn.Identity()
330
+ self.conv2 = nn.Identity()
331
+ self.conv3 = nn.Identity()
332
+
333
+ if spatial_downsample:
334
+ self.conv1 = CosmosCausalConv3d(
335
+ in_channels, in_channels, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=0
336
+ )
337
+ if temporal_downsample:
338
+ self.conv2 = CosmosCausalConv3d(
339
+ in_channels, in_channels, kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=0
340
+ )
341
+ if spatial_downsample or temporal_downsample:
342
+ self.conv3 = CosmosCausalConv3d(
343
+ in_channels, in_channels, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=0
344
+ )
345
+
346
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
347
+ if not self.spatial_downsample and not self.temporal_downsample:
348
+ return hidden_states
349
+
350
+ if self.spatial_downsample:
351
+ pad = (0, 1, 0, 1, 0, 0)
352
+ hidden_states = F.pad(hidden_states, pad, mode="constant", value=0)
353
+ conv_out = self.conv1(hidden_states)
354
+ pool_out = F.avg_pool3d(hidden_states, kernel_size=(1, 2, 2), stride=(1, 2, 2))
355
+ hidden_states = conv_out + pool_out
356
+
357
+ if self.temporal_downsample:
358
+ hidden_states = torch.cat([hidden_states[:, :, :1, ...], hidden_states], dim=2)
359
+ conv_out = self.conv2(hidden_states)
360
+ pool_out = F.avg_pool3d(hidden_states, kernel_size=(2, 1, 1), stride=(2, 1, 1))
361
+ hidden_states = conv_out + pool_out
362
+
363
+ hidden_states = self.conv3(hidden_states)
364
+ return hidden_states
365
+
366
+
367
+ class CosmosUpsample3d(nn.Module):
368
+ def __init__(
369
+ self,
370
+ in_channels: int,
371
+ spatial_upsample: bool = True,
372
+ temporal_upsample: bool = True,
373
+ ) -> None:
374
+ super().__init__()
375
+
376
+ self.spatial_upsample = spatial_upsample
377
+ self.temporal_upsample = temporal_upsample
378
+
379
+ self.conv1 = nn.Identity()
380
+ self.conv2 = nn.Identity()
381
+ self.conv3 = nn.Identity()
382
+
383
+ if temporal_upsample:
384
+ self.conv1 = CosmosCausalConv3d(
385
+ in_channels, in_channels, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=0
386
+ )
387
+ if spatial_upsample:
388
+ self.conv2 = CosmosCausalConv3d(
389
+ in_channels, in_channels, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=1
390
+ )
391
+ if spatial_upsample or temporal_upsample:
392
+ self.conv3 = CosmosCausalConv3d(
393
+ in_channels, in_channels, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=0
394
+ )
395
+
396
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
397
+ if not self.spatial_upsample and not self.temporal_upsample:
398
+ return hidden_states
399
+
400
+ if self.temporal_upsample:
401
+ num_frames = hidden_states.size(2)
402
+ time_factor = int(1.0 + 1.0 * (num_frames > 1))
403
+ hidden_states = hidden_states.repeat_interleave(int(time_factor), dim=2)
404
+ hidden_states = hidden_states[..., time_factor - 1 :, :, :]
405
+ hidden_states = self.conv1(hidden_states) + hidden_states
406
+
407
+ if self.spatial_upsample:
408
+ hidden_states = hidden_states.repeat_interleave(2, dim=3).repeat_interleave(2, dim=4)
409
+ hidden_states = self.conv2(hidden_states) + hidden_states
410
+
411
+ hidden_states = self.conv3(hidden_states)
412
+ return hidden_states
413
+
414
+
415
+ class CosmosCausalAttention(nn.Module):
416
+ def __init__(
417
+ self,
418
+ num_attention_heads: int,
419
+ attention_head_dim: int,
420
+ num_groups: int = 1,
421
+ dropout: float = 0.0,
422
+ processor: Union["CosmosSpatialAttentionProcessor2_0", "CosmosTemporalAttentionProcessor2_0"] = None,
423
+ ) -> None:
424
+ super().__init__()
425
+ self.num_attention_heads = num_attention_heads
426
+
427
+ self.norm = CosmosCausalGroupNorm(attention_head_dim, num_groups=num_groups)
428
+ self.to_q = CosmosCausalConv3d(attention_head_dim, attention_head_dim, kernel_size=1, stride=1, padding=0)
429
+ self.to_k = CosmosCausalConv3d(attention_head_dim, attention_head_dim, kernel_size=1, stride=1, padding=0)
430
+ self.to_v = CosmosCausalConv3d(attention_head_dim, attention_head_dim, kernel_size=1, stride=1, padding=0)
431
+ self.to_out = nn.ModuleList([])
432
+ self.to_out.append(
433
+ CosmosCausalConv3d(attention_head_dim, attention_head_dim, kernel_size=1, stride=1, padding=0)
434
+ )
435
+ self.to_out.append(nn.Dropout(dropout))
436
+
437
+ self.processor = processor
438
+ if self.processor is None:
439
+ raise ValueError("CosmosCausalAttention requires a processor.")
440
+
441
+ def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
442
+ return self.processor(self, hidden_states=hidden_states, attention_mask=attention_mask)
443
+
444
+
445
+ class CosmosSpatialAttentionProcessor2_0:
446
+ def __init__(self):
447
+ if not hasattr(F, "scaled_dot_product_attention"):
448
+ raise ImportError(
449
+ "CosmosSpatialAttentionProcessor2_0 requires PyTorch 2.0 or higher. To use it, please upgrade PyTorch."
450
+ )
451
+
452
+ def __call__(
453
+ self, attn: CosmosCausalAttention, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None
454
+ ) -> torch.Tensor:
455
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
456
+ residual = hidden_states
457
+
458
+ hidden_states = attn.norm(hidden_states)
459
+ query = attn.to_q(hidden_states)
460
+ key = attn.to_k(hidden_states)
461
+ value = attn.to_v(hidden_states)
462
+
463
+ # [B, C, T, H, W] -> [B * T, H * W, C]
464
+ query = query.permute(0, 2, 3, 4, 1).flatten(2, 3).flatten(0, 1)
465
+ key = key.permute(0, 2, 3, 4, 1).flatten(2, 3).flatten(0, 1)
466
+ value = value.permute(0, 2, 3, 4, 1).flatten(2, 3).flatten(0, 1)
467
+
468
+ # [B * T, H * W, C] -> [B * T, N, H * W, C // N]
469
+ query = query.unflatten(2, (attn.num_attention_heads, -1)).transpose(1, 2)
470
+ key = key.unflatten(2, (attn.num_attention_heads, -1)).transpose(1, 2)
471
+ value = value.unflatten(2, (attn.num_attention_heads, -1)).transpose(1, 2)
472
+
473
+ hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask)
474
+ hidden_states = hidden_states.transpose(1, 2).flatten(2, 3).type_as(query)
475
+ hidden_states = hidden_states.unflatten(1, (height, width)).unflatten(0, (batch_size, num_frames))
476
+ hidden_states = hidden_states.permute(0, 4, 1, 2, 3)
477
+
478
+ hidden_states = attn.to_out[0](hidden_states)
479
+ hidden_states = attn.to_out[1](hidden_states)
480
+
481
+ return hidden_states + residual
482
+
483
+
484
+ class CosmosTemporalAttentionProcessor2_0:
485
+ def __init__(self):
486
+ if not hasattr(F, "scaled_dot_product_attention"):
487
+ raise ImportError(
488
+ "CosmosSpatialAttentionProcessor2_0 requires PyTorch 2.0 or higher. To use it, please upgrade PyTorch."
489
+ )
490
+
491
+ def __call__(
492
+ self, attn: CosmosCausalAttention, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None
493
+ ) -> torch.Tensor:
494
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
495
+ residual = hidden_states
496
+
497
+ hidden_states = attn.norm(hidden_states)
498
+ query = attn.to_q(hidden_states)
499
+ key = attn.to_k(hidden_states)
500
+ value = attn.to_v(hidden_states)
501
+
502
+ # [B, C, T, H, W] -> [B * T, H * W, C]
503
+ query = query.permute(0, 3, 4, 2, 1).flatten(0, 2)
504
+ key = key.permute(0, 3, 4, 2, 1).flatten(0, 2)
505
+ value = value.permute(0, 3, 4, 2, 1).flatten(0, 2)
506
+
507
+ # [B * T, H * W, C] -> [B * T, N, H * W, C // N]
508
+ query = query.unflatten(2, (attn.num_attention_heads, -1)).transpose(1, 2)
509
+ key = key.unflatten(2, (attn.num_attention_heads, -1)).transpose(1, 2)
510
+ value = value.unflatten(2, (attn.num_attention_heads, -1)).transpose(1, 2)
511
+
512
+ hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask)
513
+ hidden_states = hidden_states.transpose(1, 2).flatten(2, 3).type_as(query)
514
+ hidden_states = hidden_states.unflatten(0, (batch_size, height, width))
515
+ hidden_states = hidden_states.permute(0, 4, 3, 1, 2)
516
+
517
+ hidden_states = attn.to_out[0](hidden_states)
518
+ hidden_states = attn.to_out[1](hidden_states)
519
+
520
+ return hidden_states + residual
521
+
522
+
523
+ class CosmosDownBlock3d(nn.Module):
524
+ def __init__(
525
+ self,
526
+ in_channels: int,
527
+ out_channels: int,
528
+ num_layers: int,
529
+ dropout: float,
530
+ use_attention: bool,
531
+ use_downsample: bool,
532
+ spatial_downsample: bool,
533
+ temporal_downsample: bool,
534
+ ) -> None:
535
+ super().__init__()
536
+
537
+ resnets, attentions, temp_attentions = [], [], []
538
+ in_channel, out_channel = in_channels, out_channels
539
+
540
+ for _ in range(num_layers):
541
+ resnets.append(CosmosResnetBlock3d(in_channel, out_channel, dropout, num_groups=1))
542
+ in_channel = out_channel
543
+
544
+ if use_attention:
545
+ attentions.append(
546
+ CosmosCausalAttention(
547
+ num_attention_heads=1,
548
+ attention_head_dim=out_channel,
549
+ num_groups=1,
550
+ dropout=dropout,
551
+ processor=CosmosSpatialAttentionProcessor2_0(),
552
+ )
553
+ )
554
+ temp_attentions.append(
555
+ CosmosCausalAttention(
556
+ num_attention_heads=1,
557
+ attention_head_dim=out_channel,
558
+ num_groups=1,
559
+ dropout=dropout,
560
+ processor=CosmosTemporalAttentionProcessor2_0(),
561
+ )
562
+ )
563
+ else:
564
+ attentions.append(None)
565
+ temp_attentions.append(None)
566
+
567
+ self.resnets = nn.ModuleList(resnets)
568
+ self.attentions = nn.ModuleList(attentions)
569
+ self.temp_attentions = nn.ModuleList(temp_attentions)
570
+
571
+ self.downsamplers = None
572
+ if use_downsample:
573
+ self.downsamplers = nn.ModuleList([])
574
+ self.downsamplers.append(CosmosDownsample3d(out_channel, spatial_downsample, temporal_downsample))
575
+
576
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
577
+ for resnet, attention, temp_attention in zip(self.resnets, self.attentions, self.temp_attentions):
578
+ hidden_states = resnet(hidden_states)
579
+ if attention is not None:
580
+ hidden_states = attention(hidden_states)
581
+ if temp_attention is not None:
582
+ num_frames = hidden_states.size(2)
583
+ attention_mask = torch.tril(hidden_states.new_ones(num_frames, num_frames)).bool()
584
+ hidden_states = temp_attention(hidden_states, attention_mask)
585
+
586
+ if self.downsamplers is not None:
587
+ for downsampler in self.downsamplers:
588
+ hidden_states = downsampler(hidden_states)
589
+
590
+ return hidden_states
591
+
592
+
593
+ class CosmosMidBlock3d(nn.Module):
594
+ def __init__(self, in_channels: int, num_layers: int, dropout: float, num_groups: int = 1) -> None:
595
+ super().__init__()
596
+
597
+ resnets, attentions, temp_attentions = [], [], []
598
+
599
+ resnets.append(CosmosResnetBlock3d(in_channels, in_channels, dropout, num_groups))
600
+ for _ in range(num_layers):
601
+ attentions.append(
602
+ CosmosCausalAttention(
603
+ num_attention_heads=1,
604
+ attention_head_dim=in_channels,
605
+ num_groups=num_groups,
606
+ dropout=dropout,
607
+ processor=CosmosSpatialAttentionProcessor2_0(),
608
+ )
609
+ )
610
+ temp_attentions.append(
611
+ CosmosCausalAttention(
612
+ num_attention_heads=1,
613
+ attention_head_dim=in_channels,
614
+ num_groups=num_groups,
615
+ dropout=dropout,
616
+ processor=CosmosTemporalAttentionProcessor2_0(),
617
+ )
618
+ )
619
+ resnets.append(CosmosResnetBlock3d(in_channels, in_channels, dropout, num_groups))
620
+
621
+ self.resnets = nn.ModuleList(resnets)
622
+ self.attentions = nn.ModuleList(attentions)
623
+ self.temp_attentions = nn.ModuleList(temp_attentions)
624
+
625
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
626
+ hidden_states = self.resnets[0](hidden_states)
627
+
628
+ for attention, temp_attention, resnet in zip(self.attentions, self.temp_attentions, self.resnets[1:]):
629
+ num_frames = hidden_states.size(2)
630
+ attention_mask = torch.tril(hidden_states.new_ones(num_frames, num_frames)).bool()
631
+
632
+ hidden_states = attention(hidden_states)
633
+ hidden_states = temp_attention(hidden_states, attention_mask)
634
+ hidden_states = resnet(hidden_states)
635
+
636
+ return hidden_states
637
+
638
+
639
+ class CosmosUpBlock3d(nn.Module):
640
+ def __init__(
641
+ self,
642
+ in_channels: int,
643
+ out_channels: int,
644
+ num_layers: int,
645
+ dropout: float,
646
+ use_attention: bool,
647
+ use_upsample: bool,
648
+ spatial_upsample: bool,
649
+ temporal_upsample: bool,
650
+ ) -> None:
651
+ super().__init__()
652
+
653
+ resnets, attention, temp_attentions = [], [], []
654
+ in_channel, out_channel = in_channels, out_channels
655
+
656
+ for _ in range(num_layers):
657
+ resnets.append(CosmosResnetBlock3d(in_channel, out_channel, dropout, num_groups=1))
658
+ in_channel = out_channel
659
+
660
+ if use_attention:
661
+ attention.append(
662
+ CosmosCausalAttention(
663
+ num_attention_heads=1,
664
+ attention_head_dim=out_channel,
665
+ num_groups=1,
666
+ dropout=dropout,
667
+ processor=CosmosSpatialAttentionProcessor2_0(),
668
+ )
669
+ )
670
+ temp_attentions.append(
671
+ CosmosCausalAttention(
672
+ num_attention_heads=1,
673
+ attention_head_dim=out_channel,
674
+ num_groups=1,
675
+ dropout=dropout,
676
+ processor=CosmosTemporalAttentionProcessor2_0(),
677
+ )
678
+ )
679
+ else:
680
+ attention.append(None)
681
+ temp_attentions.append(None)
682
+
683
+ self.resnets = nn.ModuleList(resnets)
684
+ self.attentions = nn.ModuleList(attention)
685
+ self.temp_attentions = nn.ModuleList(temp_attentions)
686
+
687
+ self.upsamplers = None
688
+ if use_upsample:
689
+ self.upsamplers = nn.ModuleList([])
690
+ self.upsamplers.append(CosmosUpsample3d(out_channel, spatial_upsample, temporal_upsample))
691
+
692
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
693
+ for resnet, attention, temp_attention in zip(self.resnets, self.attentions, self.temp_attentions):
694
+ hidden_states = resnet(hidden_states)
695
+ if attention is not None:
696
+ hidden_states = attention(hidden_states)
697
+ if temp_attention is not None:
698
+ num_frames = hidden_states.size(2)
699
+ attention_mask = torch.tril(hidden_states.new_ones(num_frames, num_frames)).bool()
700
+ hidden_states = temp_attention(hidden_states, attention_mask)
701
+
702
+ if self.upsamplers is not None:
703
+ for upsampler in self.upsamplers:
704
+ hidden_states = upsampler(hidden_states)
705
+
706
+ return hidden_states
707
+
708
+
709
+ class CosmosEncoder3d(nn.Module):
710
+ def __init__(
711
+ self,
712
+ in_channels: int = 3,
713
+ out_channels: int = 16,
714
+ block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
715
+ num_resnet_blocks: int = 2,
716
+ attention_resolutions: Tuple[int, ...] = (32,),
717
+ resolution: int = 1024,
718
+ patch_size: int = 4,
719
+ patch_type: str = "haar",
720
+ dropout: float = 0.0,
721
+ spatial_compression_ratio: int = 8,
722
+ temporal_compression_ratio: int = 8,
723
+ ) -> None:
724
+ super().__init__()
725
+ inner_dim = in_channels * patch_size**3
726
+ num_spatial_layers = int(math.log2(spatial_compression_ratio)) - int(math.log2(patch_size))
727
+ num_temporal_layers = int(math.log2(temporal_compression_ratio)) - int(math.log2(patch_size))
728
+
729
+ # 1. Input patching & projection
730
+ self.patch_embed = CosmosPatchEmbed3d(patch_size, patch_type)
731
+
732
+ self.conv_in = CosmosConvProjection3d(inner_dim, block_out_channels[0])
733
+
734
+ # 2. Down blocks
735
+ current_resolution = resolution // patch_size
736
+ down_blocks = []
737
+ for i in range(len(block_out_channels) - 1):
738
+ in_channel = block_out_channels[i]
739
+ out_channel = block_out_channels[i + 1]
740
+
741
+ use_attention = current_resolution in attention_resolutions
742
+ spatial_downsample = temporal_downsample = False
743
+ if i < len(block_out_channels) - 2:
744
+ use_downsample = True
745
+ spatial_downsample = i < num_spatial_layers
746
+ temporal_downsample = i < num_temporal_layers
747
+ current_resolution = current_resolution // 2
748
+ else:
749
+ use_downsample = False
750
+
751
+ down_blocks.append(
752
+ CosmosDownBlock3d(
753
+ in_channel,
754
+ out_channel,
755
+ num_resnet_blocks,
756
+ dropout,
757
+ use_attention,
758
+ use_downsample,
759
+ spatial_downsample,
760
+ temporal_downsample,
761
+ )
762
+ )
763
+ self.down_blocks = nn.ModuleList(down_blocks)
764
+
765
+ # 3. Mid block
766
+ self.mid_block = CosmosMidBlock3d(block_out_channels[-1], num_layers=1, dropout=dropout, num_groups=1)
767
+
768
+ # 4. Output norm & projection
769
+ self.norm_out = CosmosCausalGroupNorm(block_out_channels[-1], num_groups=1)
770
+ self.conv_out = CosmosConvProjection3d(block_out_channels[-1], out_channels)
771
+
772
+ self.gradient_checkpointing = False
773
+
774
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
775
+ hidden_states = self.patch_embed(hidden_states)
776
+ hidden_states = self.conv_in(hidden_states)
777
+
778
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
779
+ for block in self.down_blocks:
780
+ hidden_states = self._gradient_checkpointing_func(block, hidden_states)
781
+ hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states)
782
+ else:
783
+ for block in self.down_blocks:
784
+ hidden_states = block(hidden_states)
785
+ hidden_states = self.mid_block(hidden_states)
786
+
787
+ hidden_states = self.norm_out(hidden_states)
788
+ hidden_states = F.silu(hidden_states)
789
+ hidden_states = self.conv_out(hidden_states)
790
+ return hidden_states
791
+
792
+
793
+ class CosmosDecoder3d(nn.Module):
794
+ def __init__(
795
+ self,
796
+ in_channels: int = 16,
797
+ out_channels: int = 3,
798
+ block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
799
+ num_resnet_blocks: int = 2,
800
+ attention_resolutions: Tuple[int, ...] = (32,),
801
+ resolution: int = 1024,
802
+ patch_size: int = 4,
803
+ patch_type: str = "haar",
804
+ dropout: float = 0.0,
805
+ spatial_compression_ratio: int = 8,
806
+ temporal_compression_ratio: int = 8,
807
+ ) -> None:
808
+ super().__init__()
809
+ inner_dim = out_channels * patch_size**3
810
+ num_spatial_layers = int(math.log2(spatial_compression_ratio)) - int(math.log2(patch_size))
811
+ num_temporal_layers = int(math.log2(temporal_compression_ratio)) - int(math.log2(patch_size))
812
+ reversed_block_out_channels = list(reversed(block_out_channels))
813
+
814
+ # 1. Input projection
815
+ self.conv_in = CosmosConvProjection3d(in_channels, reversed_block_out_channels[0])
816
+
817
+ # 2. Mid block
818
+ self.mid_block = CosmosMidBlock3d(reversed_block_out_channels[0], num_layers=1, dropout=dropout, num_groups=1)
819
+
820
+ # 3. Up blocks
821
+ current_resolution = (resolution // patch_size) // 2 ** (len(block_out_channels) - 2)
822
+ up_blocks = []
823
+ for i in range(len(block_out_channels) - 1):
824
+ in_channel = reversed_block_out_channels[i]
825
+ out_channel = reversed_block_out_channels[i + 1]
826
+
827
+ use_attention = current_resolution in attention_resolutions
828
+ spatial_upsample = temporal_upsample = False
829
+ if i < len(block_out_channels) - 2:
830
+ use_upsample = True
831
+ temporal_upsample = 0 < i < num_temporal_layers + 1
832
+ spatial_upsample = temporal_upsample or (
833
+ i < num_spatial_layers and num_spatial_layers > num_temporal_layers
834
+ )
835
+ current_resolution = current_resolution * 2
836
+ else:
837
+ use_upsample = False
838
+
839
+ up_blocks.append(
840
+ CosmosUpBlock3d(
841
+ in_channel,
842
+ out_channel,
843
+ num_resnet_blocks + 1,
844
+ dropout,
845
+ use_attention,
846
+ use_upsample,
847
+ spatial_upsample,
848
+ temporal_upsample,
849
+ )
850
+ )
851
+ self.up_blocks = nn.ModuleList(up_blocks)
852
+
853
+ # 4. Output norm & projection & unpatching
854
+ self.norm_out = CosmosCausalGroupNorm(reversed_block_out_channels[-1], num_groups=1)
855
+ self.conv_out = CosmosConvProjection3d(reversed_block_out_channels[-1], inner_dim)
856
+
857
+ self.unpatch_embed = CosmosUnpatcher3d(patch_size, patch_type)
858
+
859
+ self.gradient_checkpointing = False
860
+
861
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
862
+ hidden_states = self.conv_in(hidden_states)
863
+ hidden_states = self.mid_block(hidden_states)
864
+
865
+ for block in self.up_blocks:
866
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
867
+ hidden_states = self._gradient_checkpointing_func(block, hidden_states)
868
+ else:
869
+ hidden_states = block(hidden_states)
870
+
871
+ hidden_states = self.norm_out(hidden_states)
872
+ hidden_states = F.silu(hidden_states)
873
+ hidden_states = self.conv_out(hidden_states)
874
+ hidden_states = self.unpatch_embed(hidden_states)
875
+ return hidden_states
876
+
877
+
878
+ class AutoencoderKLCosmos(ModelMixin, ConfigMixin):
879
+ r"""
880
+ Autoencoder used in [Cosmos](https://huggingface.co/papers/2501.03575).
881
+
882
+ Args:
883
+ in_channels (`int`, defaults to `3`):
884
+ Number of input channels.
885
+ out_channels (`int`, defaults to `3`):
886
+ Number of output channels.
887
+ latent_channels (`int`, defaults to `16`):
888
+ Number of latent channels.
889
+ encoder_block_out_channels (`Tuple[int, ...]`, defaults to `(128, 256, 512, 512)`):
890
+ Number of output channels for each encoder down block.
891
+ decode_block_out_channels (`Tuple[int, ...]`, defaults to `(256, 512, 512, 512)`):
892
+ Number of output channels for each decoder up block.
893
+ attention_resolutions (`Tuple[int, ...]`, defaults to `(32,)`):
894
+ List of image/video resolutions at which to apply attention.
895
+ resolution (`int`, defaults to `1024`):
896
+ Base image/video resolution used for computing whether a block should have attention layers.
897
+ num_layers (`int`, defaults to `2`):
898
+ Number of resnet blocks in each encoder/decoder block.
899
+ patch_size (`int`, defaults to `4`):
900
+ Patch size used for patching the input image/video.
901
+ patch_type (`str`, defaults to `haar`):
902
+ Patch type used for patching the input image/video. Can be either `haar` or `rearrange`.
903
+ scaling_factor (`float`, defaults to `1.0`):
904
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
905
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
906
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
907
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
908
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
909
+ Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper. Not applicable in
910
+ Cosmos, but we default to 1.0 for consistency.
911
+ spatial_compression_ratio (`int`, defaults to `8`):
912
+ The spatial compression ratio to apply in the VAE. The number of downsample blocks is determined using
913
+ this.
914
+ temporal_compression_ratio (`int`, defaults to `8`):
915
+ The temporal compression ratio to apply in the VAE. The number of downsample blocks is determined using
916
+ this.
917
+ """
918
+
919
+ _supports_gradient_checkpointing = True
920
+
921
+ @register_to_config
922
+ def __init__(
923
+ self,
924
+ in_channels: int = 3,
925
+ out_channels: int = 3,
926
+ latent_channels: int = 16,
927
+ encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
928
+ decode_block_out_channels: Tuple[int, ...] = (256, 512, 512, 512),
929
+ attention_resolutions: Tuple[int, ...] = (32,),
930
+ resolution: int = 1024,
931
+ num_layers: int = 2,
932
+ patch_size: int = 4,
933
+ patch_type: str = "haar",
934
+ scaling_factor: float = 1.0,
935
+ spatial_compression_ratio: int = 8,
936
+ temporal_compression_ratio: int = 8,
937
+ latents_mean: Optional[List[float]] = LATENTS_MEAN,
938
+ latents_std: Optional[List[float]] = LATENTS_STD,
939
+ ) -> None:
940
+ super().__init__()
941
+
942
+ self.encoder = CosmosEncoder3d(
943
+ in_channels=in_channels,
944
+ out_channels=latent_channels,
945
+ block_out_channels=encoder_block_out_channels,
946
+ num_resnet_blocks=num_layers,
947
+ attention_resolutions=attention_resolutions,
948
+ resolution=resolution,
949
+ patch_size=patch_size,
950
+ patch_type=patch_type,
951
+ spatial_compression_ratio=spatial_compression_ratio,
952
+ temporal_compression_ratio=temporal_compression_ratio,
953
+ )
954
+ self.decoder = CosmosDecoder3d(
955
+ in_channels=latent_channels,
956
+ out_channels=out_channels,
957
+ block_out_channels=decode_block_out_channels,
958
+ num_resnet_blocks=num_layers,
959
+ attention_resolutions=attention_resolutions,
960
+ resolution=resolution,
961
+ patch_size=patch_size,
962
+ patch_type=patch_type,
963
+ spatial_compression_ratio=spatial_compression_ratio,
964
+ temporal_compression_ratio=temporal_compression_ratio,
965
+ )
966
+
967
+ self.quant_conv = CosmosCausalConv3d(latent_channels, latent_channels, kernel_size=1, padding=0)
968
+ self.post_quant_conv = CosmosCausalConv3d(latent_channels, latent_channels, kernel_size=1, padding=0)
969
+
970
+ # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension
971
+ # to perform decoding of a single video latent at a time.
972
+ self.use_slicing = False
973
+
974
+ # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent
975
+ # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the
976
+ # intermediate tiles together, the memory requirement can be lowered.
977
+ self.use_tiling = False
978
+
979
+ # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames
980
+ # at a fixed frame batch size (based on `self.num_latent_frames_batch_sizes`), the memory requirement can be lowered.
981
+ self.use_framewise_encoding = False
982
+ self.use_framewise_decoding = False
983
+
984
+ # This can be configured based on the amount of GPU memory available.
985
+ # `16` for sample frames and `2` for latent frames are sensible defaults for consumer GPUs.
986
+ # Setting it to higher values results in higher memory usage.
987
+ self.num_sample_frames_batch_size = 16
988
+ self.num_latent_frames_batch_size = 2
989
+
990
+ # The minimal tile height and width for spatial tiling to be used
991
+ self.tile_sample_min_height = 512
992
+ self.tile_sample_min_width = 512
993
+ self.tile_sample_min_num_frames = 16
994
+
995
+ # The minimal distance between two spatial tiles
996
+ self.tile_sample_stride_height = 448
997
+ self.tile_sample_stride_width = 448
998
+ self.tile_sample_stride_num_frames = 8
999
+
1000
+ def enable_tiling(
1001
+ self,
1002
+ tile_sample_min_height: Optional[int] = None,
1003
+ tile_sample_min_width: Optional[int] = None,
1004
+ tile_sample_min_num_frames: Optional[int] = None,
1005
+ tile_sample_stride_height: Optional[float] = None,
1006
+ tile_sample_stride_width: Optional[float] = None,
1007
+ tile_sample_stride_num_frames: Optional[float] = None,
1008
+ ) -> None:
1009
+ r"""
1010
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
1011
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
1012
+ processing larger images.
1013
+
1014
+ Args:
1015
+ tile_sample_min_height (`int`, *optional*):
1016
+ The minimum height required for a sample to be separated into tiles across the height dimension.
1017
+ tile_sample_min_width (`int`, *optional*):
1018
+ The minimum width required for a sample to be separated into tiles across the width dimension.
1019
+ tile_sample_stride_height (`int`, *optional*):
1020
+ The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
1021
+ no tiling artifacts produced across the height dimension.
1022
+ tile_sample_stride_width (`int`, *optional*):
1023
+ The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
1024
+ artifacts produced across the width dimension.
1025
+ """
1026
+ self.use_tiling = True
1027
+ self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
1028
+ self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
1029
+ self.tile_sample_min_num_frames = tile_sample_min_num_frames or self.tile_sample_min_num_frames
1030
+ self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height
1031
+ self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width
1032
+ self.tile_sample_stride_num_frames = tile_sample_stride_num_frames or self.tile_sample_stride_num_frames
1033
+
1034
+ def disable_tiling(self) -> None:
1035
+ r"""
1036
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
1037
+ decoding in one step.
1038
+ """
1039
+ self.use_tiling = False
1040
+
1041
+ def enable_slicing(self) -> None:
1042
+ r"""
1043
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
1044
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
1045
+ """
1046
+ self.use_slicing = True
1047
+
1048
+ def disable_slicing(self) -> None:
1049
+ r"""
1050
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
1051
+ decoding in one step.
1052
+ """
1053
+ self.use_slicing = False
1054
+
1055
+ def _encode(self, x: torch.Tensor) -> torch.Tensor:
1056
+ x = self.encoder(x)
1057
+ enc = self.quant_conv(x)
1058
+ return enc
1059
+
1060
+ @apply_forward_hook
1061
+ def encode(self, x: torch.Tensor, return_dict: bool = True) -> torch.Tensor:
1062
+ if self.use_slicing and x.shape[0] > 1:
1063
+ encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
1064
+ h = torch.cat(encoded_slices)
1065
+ else:
1066
+ h = self._encode(x)
1067
+
1068
+ posterior = IdentityDistribution(h)
1069
+
1070
+ if not return_dict:
1071
+ return (posterior,)
1072
+ return AutoencoderKLOutput(latent_dist=posterior)
1073
+
1074
+ def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
1075
+ z = self.post_quant_conv(z)
1076
+ dec = self.decoder(z)
1077
+
1078
+ if not return_dict:
1079
+ return (dec,)
1080
+ return DecoderOutput(sample=dec)
1081
+
1082
+ @apply_forward_hook
1083
+ def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
1084
+ if self.use_slicing and z.shape[0] > 1:
1085
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
1086
+ decoded = torch.cat(decoded_slices)
1087
+ else:
1088
+ decoded = self._decode(z).sample
1089
+
1090
+ if not return_dict:
1091
+ return (decoded,)
1092
+ return DecoderOutput(sample=decoded)
1093
+
1094
+ def forward(
1095
+ self,
1096
+ sample: torch.Tensor,
1097
+ sample_posterior: bool = False,
1098
+ return_dict: bool = True,
1099
+ generator: Optional[torch.Generator] = None,
1100
+ ) -> Union[Tuple[torch.Tensor], DecoderOutput]:
1101
+ x = sample
1102
+ posterior = self.encode(x).latent_dist
1103
+ if sample_posterior:
1104
+ z = posterior.sample(generator=generator)
1105
+ else:
1106
+ z = posterior.mode()
1107
+ dec = self.decode(z).sample
1108
+ if not return_dict:
1109
+ return (dec,)
1110
+ return DecoderOutput(sample=dec)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py ADDED
@@ -0,0 +1,1096 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The Hunyuan Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Optional, Tuple, Union
16
+
17
+ import numpy as np
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ import torch.utils.checkpoint
22
+
23
+ from ...configuration_utils import ConfigMixin, register_to_config
24
+ from ...utils import logging
25
+ from ...utils.accelerate_utils import apply_forward_hook
26
+ from ..activations import get_activation
27
+ from ..attention_processor import Attention
28
+ from ..modeling_outputs import AutoencoderKLOutput
29
+ from ..modeling_utils import ModelMixin
30
+ from .vae import DecoderOutput, DiagonalGaussianDistribution
31
+
32
+
33
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
+
35
+
36
+ def prepare_causal_attention_mask(
37
+ num_frames: int, height_width: int, dtype: torch.dtype, device: torch.device, batch_size: int = None
38
+ ) -> torch.Tensor:
39
+ indices = torch.arange(1, num_frames + 1, dtype=torch.int32, device=device)
40
+ indices_blocks = indices.repeat_interleave(height_width)
41
+ x, y = torch.meshgrid(indices_blocks, indices_blocks, indexing="xy")
42
+ mask = torch.where(x <= y, 0, -float("inf")).to(dtype=dtype)
43
+
44
+ if batch_size is not None:
45
+ mask = mask.unsqueeze(0).expand(batch_size, -1, -1)
46
+ return mask
47
+
48
+
49
+ class HunyuanVideoCausalConv3d(nn.Module):
50
+ def __init__(
51
+ self,
52
+ in_channels: int,
53
+ out_channels: int,
54
+ kernel_size: Union[int, Tuple[int, int, int]] = 3,
55
+ stride: Union[int, Tuple[int, int, int]] = 1,
56
+ padding: Union[int, Tuple[int, int, int]] = 0,
57
+ dilation: Union[int, Tuple[int, int, int]] = 1,
58
+ bias: bool = True,
59
+ pad_mode: str = "replicate",
60
+ ) -> None:
61
+ super().__init__()
62
+
63
+ kernel_size = (kernel_size, kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size
64
+
65
+ self.pad_mode = pad_mode
66
+ self.time_causal_padding = (
67
+ kernel_size[0] // 2,
68
+ kernel_size[0] // 2,
69
+ kernel_size[1] // 2,
70
+ kernel_size[1] // 2,
71
+ kernel_size[2] - 1,
72
+ 0,
73
+ )
74
+
75
+ self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)
76
+
77
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
78
+ hidden_states = F.pad(hidden_states, self.time_causal_padding, mode=self.pad_mode)
79
+ return self.conv(hidden_states)
80
+
81
+
82
+ class HunyuanVideoUpsampleCausal3D(nn.Module):
83
+ def __init__(
84
+ self,
85
+ in_channels: int,
86
+ out_channels: Optional[int] = None,
87
+ kernel_size: int = 3,
88
+ stride: int = 1,
89
+ bias: bool = True,
90
+ upsample_factor: Tuple[float, float, float] = (2, 2, 2),
91
+ ) -> None:
92
+ super().__init__()
93
+
94
+ out_channels = out_channels or in_channels
95
+ self.upsample_factor = upsample_factor
96
+
97
+ self.conv = HunyuanVideoCausalConv3d(in_channels, out_channels, kernel_size, stride, bias=bias)
98
+
99
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
100
+ num_frames = hidden_states.size(2)
101
+
102
+ first_frame, other_frames = hidden_states.split((1, num_frames - 1), dim=2)
103
+ first_frame = F.interpolate(
104
+ first_frame.squeeze(2), scale_factor=self.upsample_factor[1:], mode="nearest"
105
+ ).unsqueeze(2)
106
+
107
+ if num_frames > 1:
108
+ # See: https://github.com/pytorch/pytorch/issues/81665
109
+ # Unless you have a version of pytorch where non-contiguous implementation of F.interpolate
110
+ # is fixed, this will raise either a runtime error, or fail silently with bad outputs.
111
+ # If you are encountering an error here, make sure to try running encoding/decoding with
112
+ # `vae.enable_tiling()` first. If that doesn't work, open an issue at:
113
+ # https://github.com/huggingface/diffusers/issues
114
+ other_frames = other_frames.contiguous()
115
+ other_frames = F.interpolate(other_frames, scale_factor=self.upsample_factor, mode="nearest")
116
+ hidden_states = torch.cat((first_frame, other_frames), dim=2)
117
+ else:
118
+ hidden_states = first_frame
119
+
120
+ hidden_states = self.conv(hidden_states)
121
+ return hidden_states
122
+
123
+
124
+ class HunyuanVideoDownsampleCausal3D(nn.Module):
125
+ def __init__(
126
+ self,
127
+ channels: int,
128
+ out_channels: Optional[int] = None,
129
+ padding: int = 1,
130
+ kernel_size: int = 3,
131
+ bias: bool = True,
132
+ stride=2,
133
+ ) -> None:
134
+ super().__init__()
135
+ out_channels = out_channels or channels
136
+
137
+ self.conv = HunyuanVideoCausalConv3d(channels, out_channels, kernel_size, stride, padding, bias=bias)
138
+
139
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
140
+ hidden_states = self.conv(hidden_states)
141
+ return hidden_states
142
+
143
+
144
+ class HunyuanVideoResnetBlockCausal3D(nn.Module):
145
+ def __init__(
146
+ self,
147
+ in_channels: int,
148
+ out_channels: Optional[int] = None,
149
+ dropout: float = 0.0,
150
+ groups: int = 32,
151
+ eps: float = 1e-6,
152
+ non_linearity: str = "swish",
153
+ ) -> None:
154
+ super().__init__()
155
+ out_channels = out_channels or in_channels
156
+
157
+ self.nonlinearity = get_activation(non_linearity)
158
+
159
+ self.norm1 = nn.GroupNorm(groups, in_channels, eps=eps, affine=True)
160
+ self.conv1 = HunyuanVideoCausalConv3d(in_channels, out_channels, 3, 1, 0)
161
+
162
+ self.norm2 = nn.GroupNorm(groups, out_channels, eps=eps, affine=True)
163
+ self.dropout = nn.Dropout(dropout)
164
+ self.conv2 = HunyuanVideoCausalConv3d(out_channels, out_channels, 3, 1, 0)
165
+
166
+ self.conv_shortcut = None
167
+ if in_channels != out_channels:
168
+ self.conv_shortcut = HunyuanVideoCausalConv3d(in_channels, out_channels, 1, 1, 0)
169
+
170
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
171
+ hidden_states = hidden_states.contiguous()
172
+ residual = hidden_states
173
+
174
+ hidden_states = self.norm1(hidden_states)
175
+ hidden_states = self.nonlinearity(hidden_states)
176
+ hidden_states = self.conv1(hidden_states)
177
+
178
+ hidden_states = self.norm2(hidden_states)
179
+ hidden_states = self.nonlinearity(hidden_states)
180
+ hidden_states = self.dropout(hidden_states)
181
+ hidden_states = self.conv2(hidden_states)
182
+
183
+ if self.conv_shortcut is not None:
184
+ residual = self.conv_shortcut(residual)
185
+
186
+ hidden_states = hidden_states + residual
187
+ return hidden_states
188
+
189
+
190
+ class HunyuanVideoMidBlock3D(nn.Module):
191
+ def __init__(
192
+ self,
193
+ in_channels: int,
194
+ dropout: float = 0.0,
195
+ num_layers: int = 1,
196
+ resnet_eps: float = 1e-6,
197
+ resnet_act_fn: str = "swish",
198
+ resnet_groups: int = 32,
199
+ add_attention: bool = True,
200
+ attention_head_dim: int = 1,
201
+ ) -> None:
202
+ super().__init__()
203
+ resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
204
+ self.add_attention = add_attention
205
+
206
+ # There is always at least one resnet
207
+ resnets = [
208
+ HunyuanVideoResnetBlockCausal3D(
209
+ in_channels=in_channels,
210
+ out_channels=in_channels,
211
+ eps=resnet_eps,
212
+ groups=resnet_groups,
213
+ dropout=dropout,
214
+ non_linearity=resnet_act_fn,
215
+ )
216
+ ]
217
+ attentions = []
218
+
219
+ for _ in range(num_layers):
220
+ if self.add_attention:
221
+ attentions.append(
222
+ Attention(
223
+ in_channels,
224
+ heads=in_channels // attention_head_dim,
225
+ dim_head=attention_head_dim,
226
+ eps=resnet_eps,
227
+ norm_num_groups=resnet_groups,
228
+ residual_connection=True,
229
+ bias=True,
230
+ upcast_softmax=True,
231
+ _from_deprecated_attn_block=True,
232
+ )
233
+ )
234
+ else:
235
+ attentions.append(None)
236
+
237
+ resnets.append(
238
+ HunyuanVideoResnetBlockCausal3D(
239
+ in_channels=in_channels,
240
+ out_channels=in_channels,
241
+ eps=resnet_eps,
242
+ groups=resnet_groups,
243
+ dropout=dropout,
244
+ non_linearity=resnet_act_fn,
245
+ )
246
+ )
247
+
248
+ self.attentions = nn.ModuleList(attentions)
249
+ self.resnets = nn.ModuleList(resnets)
250
+
251
+ self.gradient_checkpointing = False
252
+
253
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
254
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
255
+ hidden_states = self._gradient_checkpointing_func(self.resnets[0], hidden_states)
256
+
257
+ for attn, resnet in zip(self.attentions, self.resnets[1:]):
258
+ if attn is not None:
259
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
260
+ hidden_states = hidden_states.permute(0, 2, 3, 4, 1).flatten(1, 3)
261
+ attention_mask = prepare_causal_attention_mask(
262
+ num_frames, height * width, hidden_states.dtype, hidden_states.device, batch_size=batch_size
263
+ )
264
+ hidden_states = attn(hidden_states, attention_mask=attention_mask)
265
+ hidden_states = hidden_states.unflatten(1, (num_frames, height, width)).permute(0, 4, 1, 2, 3)
266
+
267
+ hidden_states = self._gradient_checkpointing_func(resnet, hidden_states)
268
+
269
+ else:
270
+ hidden_states = self.resnets[0](hidden_states)
271
+
272
+ for attn, resnet in zip(self.attentions, self.resnets[1:]):
273
+ if attn is not None:
274
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
275
+ hidden_states = hidden_states.permute(0, 2, 3, 4, 1).flatten(1, 3)
276
+ attention_mask = prepare_causal_attention_mask(
277
+ num_frames, height * width, hidden_states.dtype, hidden_states.device, batch_size=batch_size
278
+ )
279
+ hidden_states = attn(hidden_states, attention_mask=attention_mask)
280
+ hidden_states = hidden_states.unflatten(1, (num_frames, height, width)).permute(0, 4, 1, 2, 3)
281
+
282
+ hidden_states = resnet(hidden_states)
283
+
284
+ return hidden_states
285
+
286
+
287
+ class HunyuanVideoDownBlock3D(nn.Module):
288
+ def __init__(
289
+ self,
290
+ in_channels: int,
291
+ out_channels: int,
292
+ dropout: float = 0.0,
293
+ num_layers: int = 1,
294
+ resnet_eps: float = 1e-6,
295
+ resnet_act_fn: str = "swish",
296
+ resnet_groups: int = 32,
297
+ add_downsample: bool = True,
298
+ downsample_stride: int = 2,
299
+ downsample_padding: int = 1,
300
+ ) -> None:
301
+ super().__init__()
302
+ resnets = []
303
+
304
+ for i in range(num_layers):
305
+ in_channels = in_channels if i == 0 else out_channels
306
+ resnets.append(
307
+ HunyuanVideoResnetBlockCausal3D(
308
+ in_channels=in_channels,
309
+ out_channels=out_channels,
310
+ eps=resnet_eps,
311
+ groups=resnet_groups,
312
+ dropout=dropout,
313
+ non_linearity=resnet_act_fn,
314
+ )
315
+ )
316
+
317
+ self.resnets = nn.ModuleList(resnets)
318
+
319
+ if add_downsample:
320
+ self.downsamplers = nn.ModuleList(
321
+ [
322
+ HunyuanVideoDownsampleCausal3D(
323
+ out_channels,
324
+ out_channels=out_channels,
325
+ padding=downsample_padding,
326
+ stride=downsample_stride,
327
+ )
328
+ ]
329
+ )
330
+ else:
331
+ self.downsamplers = None
332
+
333
+ self.gradient_checkpointing = False
334
+
335
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
336
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
337
+ for resnet in self.resnets:
338
+ hidden_states = self._gradient_checkpointing_func(resnet, hidden_states)
339
+ else:
340
+ for resnet in self.resnets:
341
+ hidden_states = resnet(hidden_states)
342
+
343
+ if self.downsamplers is not None:
344
+ for downsampler in self.downsamplers:
345
+ hidden_states = downsampler(hidden_states)
346
+
347
+ return hidden_states
348
+
349
+
350
+ class HunyuanVideoUpBlock3D(nn.Module):
351
+ def __init__(
352
+ self,
353
+ in_channels: int,
354
+ out_channels: int,
355
+ dropout: float = 0.0,
356
+ num_layers: int = 1,
357
+ resnet_eps: float = 1e-6,
358
+ resnet_act_fn: str = "swish",
359
+ resnet_groups: int = 32,
360
+ add_upsample: bool = True,
361
+ upsample_scale_factor: Tuple[int, int, int] = (2, 2, 2),
362
+ ) -> None:
363
+ super().__init__()
364
+ resnets = []
365
+
366
+ for i in range(num_layers):
367
+ input_channels = in_channels if i == 0 else out_channels
368
+
369
+ resnets.append(
370
+ HunyuanVideoResnetBlockCausal3D(
371
+ in_channels=input_channels,
372
+ out_channels=out_channels,
373
+ eps=resnet_eps,
374
+ groups=resnet_groups,
375
+ dropout=dropout,
376
+ non_linearity=resnet_act_fn,
377
+ )
378
+ )
379
+
380
+ self.resnets = nn.ModuleList(resnets)
381
+
382
+ if add_upsample:
383
+ self.upsamplers = nn.ModuleList(
384
+ [
385
+ HunyuanVideoUpsampleCausal3D(
386
+ out_channels,
387
+ out_channels=out_channels,
388
+ upsample_factor=upsample_scale_factor,
389
+ )
390
+ ]
391
+ )
392
+ else:
393
+ self.upsamplers = None
394
+
395
+ self.gradient_checkpointing = False
396
+
397
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
398
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
399
+ for resnet in self.resnets:
400
+ hidden_states = self._gradient_checkpointing_func(resnet, hidden_states)
401
+
402
+ else:
403
+ for resnet in self.resnets:
404
+ hidden_states = resnet(hidden_states)
405
+
406
+ if self.upsamplers is not None:
407
+ for upsampler in self.upsamplers:
408
+ hidden_states = upsampler(hidden_states)
409
+
410
+ return hidden_states
411
+
412
+
413
+ class HunyuanVideoEncoder3D(nn.Module):
414
+ r"""
415
+ Causal encoder for 3D video-like data introduced in [Hunyuan Video](https://huggingface.co/papers/2412.03603).
416
+ """
417
+
418
+ def __init__(
419
+ self,
420
+ in_channels: int = 3,
421
+ out_channels: int = 3,
422
+ down_block_types: Tuple[str, ...] = (
423
+ "HunyuanVideoDownBlock3D",
424
+ "HunyuanVideoDownBlock3D",
425
+ "HunyuanVideoDownBlock3D",
426
+ "HunyuanVideoDownBlock3D",
427
+ ),
428
+ block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
429
+ layers_per_block: int = 2,
430
+ norm_num_groups: int = 32,
431
+ act_fn: str = "silu",
432
+ double_z: bool = True,
433
+ mid_block_add_attention=True,
434
+ temporal_compression_ratio: int = 4,
435
+ spatial_compression_ratio: int = 8,
436
+ ) -> None:
437
+ super().__init__()
438
+
439
+ self.conv_in = HunyuanVideoCausalConv3d(in_channels, block_out_channels[0], kernel_size=3, stride=1)
440
+ self.mid_block = None
441
+ self.down_blocks = nn.ModuleList([])
442
+
443
+ output_channel = block_out_channels[0]
444
+ for i, down_block_type in enumerate(down_block_types):
445
+ if down_block_type != "HunyuanVideoDownBlock3D":
446
+ raise ValueError(f"Unsupported down_block_type: {down_block_type}")
447
+
448
+ input_channel = output_channel
449
+ output_channel = block_out_channels[i]
450
+ is_final_block = i == len(block_out_channels) - 1
451
+ num_spatial_downsample_layers = int(np.log2(spatial_compression_ratio))
452
+ num_time_downsample_layers = int(np.log2(temporal_compression_ratio))
453
+
454
+ if temporal_compression_ratio == 4:
455
+ add_spatial_downsample = bool(i < num_spatial_downsample_layers)
456
+ add_time_downsample = bool(
457
+ i >= (len(block_out_channels) - 1 - num_time_downsample_layers) and not is_final_block
458
+ )
459
+ elif temporal_compression_ratio == 8:
460
+ add_spatial_downsample = bool(i < num_spatial_downsample_layers)
461
+ add_time_downsample = bool(i < num_time_downsample_layers)
462
+ else:
463
+ raise ValueError(f"Unsupported time_compression_ratio: {temporal_compression_ratio}")
464
+
465
+ downsample_stride_HW = (2, 2) if add_spatial_downsample else (1, 1)
466
+ downsample_stride_T = (2,) if add_time_downsample else (1,)
467
+ downsample_stride = tuple(downsample_stride_T + downsample_stride_HW)
468
+
469
+ down_block = HunyuanVideoDownBlock3D(
470
+ num_layers=layers_per_block,
471
+ in_channels=input_channel,
472
+ out_channels=output_channel,
473
+ add_downsample=bool(add_spatial_downsample or add_time_downsample),
474
+ resnet_eps=1e-6,
475
+ resnet_act_fn=act_fn,
476
+ resnet_groups=norm_num_groups,
477
+ downsample_stride=downsample_stride,
478
+ downsample_padding=0,
479
+ )
480
+
481
+ self.down_blocks.append(down_block)
482
+
483
+ self.mid_block = HunyuanVideoMidBlock3D(
484
+ in_channels=block_out_channels[-1],
485
+ resnet_eps=1e-6,
486
+ resnet_act_fn=act_fn,
487
+ attention_head_dim=block_out_channels[-1],
488
+ resnet_groups=norm_num_groups,
489
+ add_attention=mid_block_add_attention,
490
+ )
491
+
492
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
493
+ self.conv_act = nn.SiLU()
494
+
495
+ conv_out_channels = 2 * out_channels if double_z else out_channels
496
+ self.conv_out = HunyuanVideoCausalConv3d(block_out_channels[-1], conv_out_channels, kernel_size=3)
497
+
498
+ self.gradient_checkpointing = False
499
+
500
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
501
+ hidden_states = self.conv_in(hidden_states)
502
+
503
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
504
+ for down_block in self.down_blocks:
505
+ hidden_states = self._gradient_checkpointing_func(down_block, hidden_states)
506
+
507
+ hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states)
508
+ else:
509
+ for down_block in self.down_blocks:
510
+ hidden_states = down_block(hidden_states)
511
+
512
+ hidden_states = self.mid_block(hidden_states)
513
+
514
+ hidden_states = self.conv_norm_out(hidden_states)
515
+ hidden_states = self.conv_act(hidden_states)
516
+ hidden_states = self.conv_out(hidden_states)
517
+
518
+ return hidden_states
519
+
520
+
521
+ class HunyuanVideoDecoder3D(nn.Module):
522
+ r"""
523
+ Causal decoder for 3D video-like data introduced in [Hunyuan Video](https://huggingface.co/papers/2412.03603).
524
+ """
525
+
526
+ def __init__(
527
+ self,
528
+ in_channels: int = 3,
529
+ out_channels: int = 3,
530
+ up_block_types: Tuple[str, ...] = (
531
+ "HunyuanVideoUpBlock3D",
532
+ "HunyuanVideoUpBlock3D",
533
+ "HunyuanVideoUpBlock3D",
534
+ "HunyuanVideoUpBlock3D",
535
+ ),
536
+ block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
537
+ layers_per_block: int = 2,
538
+ norm_num_groups: int = 32,
539
+ act_fn: str = "silu",
540
+ mid_block_add_attention=True,
541
+ time_compression_ratio: int = 4,
542
+ spatial_compression_ratio: int = 8,
543
+ ):
544
+ super().__init__()
545
+ self.layers_per_block = layers_per_block
546
+
547
+ self.conv_in = HunyuanVideoCausalConv3d(in_channels, block_out_channels[-1], kernel_size=3, stride=1)
548
+ self.up_blocks = nn.ModuleList([])
549
+
550
+ # mid
551
+ self.mid_block = HunyuanVideoMidBlock3D(
552
+ in_channels=block_out_channels[-1],
553
+ resnet_eps=1e-6,
554
+ resnet_act_fn=act_fn,
555
+ attention_head_dim=block_out_channels[-1],
556
+ resnet_groups=norm_num_groups,
557
+ add_attention=mid_block_add_attention,
558
+ )
559
+
560
+ # up
561
+ reversed_block_out_channels = list(reversed(block_out_channels))
562
+ output_channel = reversed_block_out_channels[0]
563
+ for i, up_block_type in enumerate(up_block_types):
564
+ if up_block_type != "HunyuanVideoUpBlock3D":
565
+ raise ValueError(f"Unsupported up_block_type: {up_block_type}")
566
+
567
+ prev_output_channel = output_channel
568
+ output_channel = reversed_block_out_channels[i]
569
+ is_final_block = i == len(block_out_channels) - 1
570
+ num_spatial_upsample_layers = int(np.log2(spatial_compression_ratio))
571
+ num_time_upsample_layers = int(np.log2(time_compression_ratio))
572
+
573
+ if time_compression_ratio == 4:
574
+ add_spatial_upsample = bool(i < num_spatial_upsample_layers)
575
+ add_time_upsample = bool(
576
+ i >= len(block_out_channels) - 1 - num_time_upsample_layers and not is_final_block
577
+ )
578
+ else:
579
+ raise ValueError(f"Unsupported time_compression_ratio: {time_compression_ratio}")
580
+
581
+ upsample_scale_factor_HW = (2, 2) if add_spatial_upsample else (1, 1)
582
+ upsample_scale_factor_T = (2,) if add_time_upsample else (1,)
583
+ upsample_scale_factor = tuple(upsample_scale_factor_T + upsample_scale_factor_HW)
584
+
585
+ up_block = HunyuanVideoUpBlock3D(
586
+ num_layers=self.layers_per_block + 1,
587
+ in_channels=prev_output_channel,
588
+ out_channels=output_channel,
589
+ add_upsample=bool(add_spatial_upsample or add_time_upsample),
590
+ upsample_scale_factor=upsample_scale_factor,
591
+ resnet_eps=1e-6,
592
+ resnet_act_fn=act_fn,
593
+ resnet_groups=norm_num_groups,
594
+ )
595
+
596
+ self.up_blocks.append(up_block)
597
+ prev_output_channel = output_channel
598
+
599
+ # out
600
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
601
+ self.conv_act = nn.SiLU()
602
+ self.conv_out = HunyuanVideoCausalConv3d(block_out_channels[0], out_channels, kernel_size=3)
603
+
604
+ self.gradient_checkpointing = False
605
+
606
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
607
+ hidden_states = self.conv_in(hidden_states)
608
+
609
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
610
+ hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states)
611
+
612
+ for up_block in self.up_blocks:
613
+ hidden_states = self._gradient_checkpointing_func(up_block, hidden_states)
614
+ else:
615
+ hidden_states = self.mid_block(hidden_states)
616
+
617
+ for up_block in self.up_blocks:
618
+ hidden_states = up_block(hidden_states)
619
+
620
+ # post-process
621
+ hidden_states = self.conv_norm_out(hidden_states)
622
+ hidden_states = self.conv_act(hidden_states)
623
+ hidden_states = self.conv_out(hidden_states)
624
+
625
+ return hidden_states
626
+
627
+
628
+ class AutoencoderKLHunyuanVideo(ModelMixin, ConfigMixin):
629
+ r"""
630
+ A VAE model with KL loss for encoding videos into latents and decoding latent representations into videos.
631
+ Introduced in [HunyuanVideo](https://huggingface.co/papers/2412.03603).
632
+
633
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
634
+ for all models (such as downloading or saving).
635
+ """
636
+
637
+ _supports_gradient_checkpointing = True
638
+
639
+ @register_to_config
640
+ def __init__(
641
+ self,
642
+ in_channels: int = 3,
643
+ out_channels: int = 3,
644
+ latent_channels: int = 16,
645
+ down_block_types: Tuple[str, ...] = (
646
+ "HunyuanVideoDownBlock3D",
647
+ "HunyuanVideoDownBlock3D",
648
+ "HunyuanVideoDownBlock3D",
649
+ "HunyuanVideoDownBlock3D",
650
+ ),
651
+ up_block_types: Tuple[str, ...] = (
652
+ "HunyuanVideoUpBlock3D",
653
+ "HunyuanVideoUpBlock3D",
654
+ "HunyuanVideoUpBlock3D",
655
+ "HunyuanVideoUpBlock3D",
656
+ ),
657
+ block_out_channels: Tuple[int] = (128, 256, 512, 512),
658
+ layers_per_block: int = 2,
659
+ act_fn: str = "silu",
660
+ norm_num_groups: int = 32,
661
+ scaling_factor: float = 0.476986,
662
+ spatial_compression_ratio: int = 8,
663
+ temporal_compression_ratio: int = 4,
664
+ mid_block_add_attention: bool = True,
665
+ ) -> None:
666
+ super().__init__()
667
+
668
+ self.time_compression_ratio = temporal_compression_ratio
669
+
670
+ self.encoder = HunyuanVideoEncoder3D(
671
+ in_channels=in_channels,
672
+ out_channels=latent_channels,
673
+ down_block_types=down_block_types,
674
+ block_out_channels=block_out_channels,
675
+ layers_per_block=layers_per_block,
676
+ norm_num_groups=norm_num_groups,
677
+ act_fn=act_fn,
678
+ double_z=True,
679
+ mid_block_add_attention=mid_block_add_attention,
680
+ temporal_compression_ratio=temporal_compression_ratio,
681
+ spatial_compression_ratio=spatial_compression_ratio,
682
+ )
683
+
684
+ self.decoder = HunyuanVideoDecoder3D(
685
+ in_channels=latent_channels,
686
+ out_channels=out_channels,
687
+ up_block_types=up_block_types,
688
+ block_out_channels=block_out_channels,
689
+ layers_per_block=layers_per_block,
690
+ norm_num_groups=norm_num_groups,
691
+ act_fn=act_fn,
692
+ time_compression_ratio=temporal_compression_ratio,
693
+ spatial_compression_ratio=spatial_compression_ratio,
694
+ mid_block_add_attention=mid_block_add_attention,
695
+ )
696
+
697
+ self.quant_conv = nn.Conv3d(2 * latent_channels, 2 * latent_channels, kernel_size=1)
698
+ self.post_quant_conv = nn.Conv3d(latent_channels, latent_channels, kernel_size=1)
699
+
700
+ self.spatial_compression_ratio = spatial_compression_ratio
701
+ self.temporal_compression_ratio = temporal_compression_ratio
702
+
703
+ # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension
704
+ # to perform decoding of a single video latent at a time.
705
+ self.use_slicing = False
706
+
707
+ # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent
708
+ # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the
709
+ # intermediate tiles together, the memory requirement can be lowered.
710
+ self.use_tiling = False
711
+
712
+ # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames
713
+ # at a fixed frame batch size (based on `self.tile_sample_min_num_frames`), the memory requirement can be lowered.
714
+ self.use_framewise_encoding = True
715
+ self.use_framewise_decoding = True
716
+
717
+ # The minimal tile height and width for spatial tiling to be used
718
+ self.tile_sample_min_height = 256
719
+ self.tile_sample_min_width = 256
720
+ self.tile_sample_min_num_frames = 16
721
+
722
+ # The minimal distance between two spatial tiles
723
+ self.tile_sample_stride_height = 192
724
+ self.tile_sample_stride_width = 192
725
+ self.tile_sample_stride_num_frames = 12
726
+
727
+ def enable_tiling(
728
+ self,
729
+ tile_sample_min_height: Optional[int] = None,
730
+ tile_sample_min_width: Optional[int] = None,
731
+ tile_sample_min_num_frames: Optional[int] = None,
732
+ tile_sample_stride_height: Optional[float] = None,
733
+ tile_sample_stride_width: Optional[float] = None,
734
+ tile_sample_stride_num_frames: Optional[float] = None,
735
+ ) -> None:
736
+ r"""
737
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
738
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
739
+ processing larger images.
740
+
741
+ Args:
742
+ tile_sample_min_height (`int`, *optional*):
743
+ The minimum height required for a sample to be separated into tiles across the height dimension.
744
+ tile_sample_min_width (`int`, *optional*):
745
+ The minimum width required for a sample to be separated into tiles across the width dimension.
746
+ tile_sample_min_num_frames (`int`, *optional*):
747
+ The minimum number of frames required for a sample to be separated into tiles across the frame
748
+ dimension.
749
+ tile_sample_stride_height (`int`, *optional*):
750
+ The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
751
+ no tiling artifacts produced across the height dimension.
752
+ tile_sample_stride_width (`int`, *optional*):
753
+ The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
754
+ artifacts produced across the width dimension.
755
+ tile_sample_stride_num_frames (`int`, *optional*):
756
+ The stride between two consecutive frame tiles. This is to ensure that there are no tiling artifacts
757
+ produced across the frame dimension.
758
+ """
759
+ self.use_tiling = True
760
+ self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
761
+ self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
762
+ self.tile_sample_min_num_frames = tile_sample_min_num_frames or self.tile_sample_min_num_frames
763
+ self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height
764
+ self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width
765
+ self.tile_sample_stride_num_frames = tile_sample_stride_num_frames or self.tile_sample_stride_num_frames
766
+
767
+ def disable_tiling(self) -> None:
768
+ r"""
769
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
770
+ decoding in one step.
771
+ """
772
+ self.use_tiling = False
773
+
774
+ def enable_slicing(self) -> None:
775
+ r"""
776
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
777
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
778
+ """
779
+ self.use_slicing = True
780
+
781
+ def disable_slicing(self) -> None:
782
+ r"""
783
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
784
+ decoding in one step.
785
+ """
786
+ self.use_slicing = False
787
+
788
+ def _encode(self, x: torch.Tensor) -> torch.Tensor:
789
+ batch_size, num_channels, num_frames, height, width = x.shape
790
+
791
+ if self.use_framewise_encoding and num_frames > self.tile_sample_min_num_frames:
792
+ return self._temporal_tiled_encode(x)
793
+
794
+ if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height):
795
+ return self.tiled_encode(x)
796
+
797
+ x = self.encoder(x)
798
+ enc = self.quant_conv(x)
799
+ return enc
800
+
801
+ @apply_forward_hook
802
+ def encode(
803
+ self, x: torch.Tensor, return_dict: bool = True
804
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
805
+ r"""
806
+ Encode a batch of images into latents.
807
+
808
+ Args:
809
+ x (`torch.Tensor`): Input batch of images.
810
+ return_dict (`bool`, *optional*, defaults to `True`):
811
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
812
+
813
+ Returns:
814
+ The latent representations of the encoded videos. If `return_dict` is True, a
815
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
816
+ """
817
+ if self.use_slicing and x.shape[0] > 1:
818
+ encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
819
+ h = torch.cat(encoded_slices)
820
+ else:
821
+ h = self._encode(x)
822
+
823
+ posterior = DiagonalGaussianDistribution(h)
824
+
825
+ if not return_dict:
826
+ return (posterior,)
827
+ return AutoencoderKLOutput(latent_dist=posterior)
828
+
829
+ def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
830
+ batch_size, num_channels, num_frames, height, width = z.shape
831
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
832
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
833
+ tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio
834
+
835
+ if self.use_framewise_decoding and num_frames > tile_latent_min_num_frames:
836
+ return self._temporal_tiled_decode(z, return_dict=return_dict)
837
+
838
+ if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height):
839
+ return self.tiled_decode(z, return_dict=return_dict)
840
+
841
+ z = self.post_quant_conv(z)
842
+ dec = self.decoder(z)
843
+
844
+ if not return_dict:
845
+ return (dec,)
846
+
847
+ return DecoderOutput(sample=dec)
848
+
849
+ @apply_forward_hook
850
+ def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
851
+ r"""
852
+ Decode a batch of images.
853
+
854
+ Args:
855
+ z (`torch.Tensor`): Input batch of latent vectors.
856
+ return_dict (`bool`, *optional*, defaults to `True`):
857
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
858
+
859
+ Returns:
860
+ [`~models.vae.DecoderOutput`] or `tuple`:
861
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
862
+ returned.
863
+ """
864
+ if self.use_slicing and z.shape[0] > 1:
865
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
866
+ decoded = torch.cat(decoded_slices)
867
+ else:
868
+ decoded = self._decode(z).sample
869
+
870
+ if not return_dict:
871
+ return (decoded,)
872
+
873
+ return DecoderOutput(sample=decoded)
874
+
875
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
876
+ blend_extent = min(a.shape[-2], b.shape[-2], blend_extent)
877
+ for y in range(blend_extent):
878
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
879
+ y / blend_extent
880
+ )
881
+ return b
882
+
883
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
884
+ blend_extent = min(a.shape[-1], b.shape[-1], blend_extent)
885
+ for x in range(blend_extent):
886
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
887
+ x / blend_extent
888
+ )
889
+ return b
890
+
891
+ def blend_t(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
892
+ blend_extent = min(a.shape[-3], b.shape[-3], blend_extent)
893
+ for x in range(blend_extent):
894
+ b[:, :, x, :, :] = a[:, :, -blend_extent + x, :, :] * (1 - x / blend_extent) + b[:, :, x, :, :] * (
895
+ x / blend_extent
896
+ )
897
+ return b
898
+
899
+ def tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput:
900
+ r"""Encode a batch of images using a tiled encoder.
901
+
902
+ Args:
903
+ x (`torch.Tensor`): Input batch of videos.
904
+
905
+ Returns:
906
+ `torch.Tensor`:
907
+ The latent representation of the encoded videos.
908
+ """
909
+ batch_size, num_channels, num_frames, height, width = x.shape
910
+ latent_height = height // self.spatial_compression_ratio
911
+ latent_width = width // self.spatial_compression_ratio
912
+
913
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
914
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
915
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
916
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
917
+
918
+ blend_height = tile_latent_min_height - tile_latent_stride_height
919
+ blend_width = tile_latent_min_width - tile_latent_stride_width
920
+
921
+ # Split x into overlapping tiles and encode them separately.
922
+ # The tiles have an overlap to avoid seams between tiles.
923
+ rows = []
924
+ for i in range(0, height, self.tile_sample_stride_height):
925
+ row = []
926
+ for j in range(0, width, self.tile_sample_stride_width):
927
+ tile = x[:, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width]
928
+ tile = self.encoder(tile)
929
+ tile = self.quant_conv(tile)
930
+ row.append(tile)
931
+ rows.append(row)
932
+
933
+ result_rows = []
934
+ for i, row in enumerate(rows):
935
+ result_row = []
936
+ for j, tile in enumerate(row):
937
+ # blend the above tile and the left tile
938
+ # to the current tile and add the current tile to the result row
939
+ if i > 0:
940
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
941
+ if j > 0:
942
+ tile = self.blend_h(row[j - 1], tile, blend_width)
943
+ result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width])
944
+ result_rows.append(torch.cat(result_row, dim=4))
945
+
946
+ enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width]
947
+ return enc
948
+
949
+ def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
950
+ r"""
951
+ Decode a batch of images using a tiled decoder.
952
+
953
+ Args:
954
+ z (`torch.Tensor`): Input batch of latent vectors.
955
+ return_dict (`bool`, *optional*, defaults to `True`):
956
+ Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
957
+
958
+ Returns:
959
+ [`~models.vae.DecoderOutput`] or `tuple`:
960
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
961
+ returned.
962
+ """
963
+
964
+ batch_size, num_channels, num_frames, height, width = z.shape
965
+ sample_height = height * self.spatial_compression_ratio
966
+ sample_width = width * self.spatial_compression_ratio
967
+
968
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
969
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
970
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
971
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
972
+
973
+ blend_height = self.tile_sample_min_height - self.tile_sample_stride_height
974
+ blend_width = self.tile_sample_min_width - self.tile_sample_stride_width
975
+
976
+ # Split z into overlapping tiles and decode them separately.
977
+ # The tiles have an overlap to avoid seams between tiles.
978
+ rows = []
979
+ for i in range(0, height, tile_latent_stride_height):
980
+ row = []
981
+ for j in range(0, width, tile_latent_stride_width):
982
+ tile = z[:, :, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width]
983
+ tile = self.post_quant_conv(tile)
984
+ decoded = self.decoder(tile)
985
+ row.append(decoded)
986
+ rows.append(row)
987
+
988
+ result_rows = []
989
+ for i, row in enumerate(rows):
990
+ result_row = []
991
+ for j, tile in enumerate(row):
992
+ # blend the above tile and the left tile
993
+ # to the current tile and add the current tile to the result row
994
+ if i > 0:
995
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
996
+ if j > 0:
997
+ tile = self.blend_h(row[j - 1], tile, blend_width)
998
+ result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width])
999
+ result_rows.append(torch.cat(result_row, dim=-1))
1000
+
1001
+ dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width]
1002
+
1003
+ if not return_dict:
1004
+ return (dec,)
1005
+ return DecoderOutput(sample=dec)
1006
+
1007
+ def _temporal_tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput:
1008
+ batch_size, num_channels, num_frames, height, width = x.shape
1009
+ latent_num_frames = (num_frames - 1) // self.temporal_compression_ratio + 1
1010
+
1011
+ tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio
1012
+ tile_latent_stride_num_frames = self.tile_sample_stride_num_frames // self.temporal_compression_ratio
1013
+ blend_num_frames = tile_latent_min_num_frames - tile_latent_stride_num_frames
1014
+
1015
+ row = []
1016
+ for i in range(0, num_frames, self.tile_sample_stride_num_frames):
1017
+ tile = x[:, :, i : i + self.tile_sample_min_num_frames + 1, :, :]
1018
+ if self.use_tiling and (height > self.tile_sample_min_height or width > self.tile_sample_min_width):
1019
+ tile = self.tiled_encode(tile)
1020
+ else:
1021
+ tile = self.encoder(tile)
1022
+ tile = self.quant_conv(tile)
1023
+ if i > 0:
1024
+ tile = tile[:, :, 1:, :, :]
1025
+ row.append(tile)
1026
+
1027
+ result_row = []
1028
+ for i, tile in enumerate(row):
1029
+ if i > 0:
1030
+ tile = self.blend_t(row[i - 1], tile, blend_num_frames)
1031
+ result_row.append(tile[:, :, :tile_latent_stride_num_frames, :, :])
1032
+ else:
1033
+ result_row.append(tile[:, :, : tile_latent_stride_num_frames + 1, :, :])
1034
+
1035
+ enc = torch.cat(result_row, dim=2)[:, :, :latent_num_frames]
1036
+ return enc
1037
+
1038
+ def _temporal_tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
1039
+ batch_size, num_channels, num_frames, height, width = z.shape
1040
+ num_sample_frames = (num_frames - 1) * self.temporal_compression_ratio + 1
1041
+
1042
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1043
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1044
+ tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio
1045
+ tile_latent_stride_num_frames = self.tile_sample_stride_num_frames // self.temporal_compression_ratio
1046
+ blend_num_frames = self.tile_sample_min_num_frames - self.tile_sample_stride_num_frames
1047
+
1048
+ row = []
1049
+ for i in range(0, num_frames, tile_latent_stride_num_frames):
1050
+ tile = z[:, :, i : i + tile_latent_min_num_frames + 1, :, :]
1051
+ if self.use_tiling and (tile.shape[-1] > tile_latent_min_width or tile.shape[-2] > tile_latent_min_height):
1052
+ decoded = self.tiled_decode(tile, return_dict=True).sample
1053
+ else:
1054
+ tile = self.post_quant_conv(tile)
1055
+ decoded = self.decoder(tile)
1056
+ if i > 0:
1057
+ decoded = decoded[:, :, 1:, :, :]
1058
+ row.append(decoded)
1059
+
1060
+ result_row = []
1061
+ for i, tile in enumerate(row):
1062
+ if i > 0:
1063
+ tile = self.blend_t(row[i - 1], tile, blend_num_frames)
1064
+ result_row.append(tile[:, :, : self.tile_sample_stride_num_frames, :, :])
1065
+ else:
1066
+ result_row.append(tile[:, :, : self.tile_sample_stride_num_frames + 1, :, :])
1067
+
1068
+ dec = torch.cat(result_row, dim=2)[:, :, :num_sample_frames]
1069
+
1070
+ if not return_dict:
1071
+ return (dec,)
1072
+ return DecoderOutput(sample=dec)
1073
+
1074
+ def forward(
1075
+ self,
1076
+ sample: torch.Tensor,
1077
+ sample_posterior: bool = False,
1078
+ return_dict: bool = True,
1079
+ generator: Optional[torch.Generator] = None,
1080
+ ) -> Union[DecoderOutput, torch.Tensor]:
1081
+ r"""
1082
+ Args:
1083
+ sample (`torch.Tensor`): Input sample.
1084
+ sample_posterior (`bool`, *optional*, defaults to `False`):
1085
+ Whether to sample from the posterior.
1086
+ return_dict (`bool`, *optional*, defaults to `True`):
1087
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
1088
+ """
1089
+ x = sample
1090
+ posterior = self.encode(x).latent_dist
1091
+ if sample_posterior:
1092
+ z = posterior.sample(generator=generator)
1093
+ else:
1094
+ z = posterior.mode()
1095
+ dec = self.decode(z, return_dict=return_dict)
1096
+ return dec
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_ltx.py ADDED
@@ -0,0 +1,1557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The Lightricks team and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders import FromOriginalModelMixin
23
+ from ...utils.accelerate_utils import apply_forward_hook
24
+ from ..activations import get_activation
25
+ from ..embeddings import PixArtAlphaCombinedTimestepSizeEmbeddings
26
+ from ..modeling_outputs import AutoencoderKLOutput
27
+ from ..modeling_utils import ModelMixin
28
+ from ..normalization import RMSNorm
29
+ from .vae import DecoderOutput, DiagonalGaussianDistribution
30
+
31
+
32
+ class LTXVideoCausalConv3d(nn.Module):
33
+ def __init__(
34
+ self,
35
+ in_channels: int,
36
+ out_channels: int,
37
+ kernel_size: Union[int, Tuple[int, int, int]] = 3,
38
+ stride: Union[int, Tuple[int, int, int]] = 1,
39
+ dilation: Union[int, Tuple[int, int, int]] = 1,
40
+ groups: int = 1,
41
+ padding_mode: str = "zeros",
42
+ is_causal: bool = True,
43
+ ):
44
+ super().__init__()
45
+
46
+ self.in_channels = in_channels
47
+ self.out_channels = out_channels
48
+ self.is_causal = is_causal
49
+ self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size, kernel_size)
50
+
51
+ dilation = dilation if isinstance(dilation, tuple) else (dilation, 1, 1)
52
+ stride = stride if isinstance(stride, tuple) else (stride, stride, stride)
53
+ height_pad = self.kernel_size[1] // 2
54
+ width_pad = self.kernel_size[2] // 2
55
+ padding = (0, height_pad, width_pad)
56
+
57
+ self.conv = nn.Conv3d(
58
+ in_channels,
59
+ out_channels,
60
+ self.kernel_size,
61
+ stride=stride,
62
+ dilation=dilation,
63
+ groups=groups,
64
+ padding=padding,
65
+ padding_mode=padding_mode,
66
+ )
67
+
68
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
69
+ time_kernel_size = self.kernel_size[0]
70
+
71
+ if self.is_causal:
72
+ pad_left = hidden_states[:, :, :1, :, :].repeat((1, 1, time_kernel_size - 1, 1, 1))
73
+ hidden_states = torch.concatenate([pad_left, hidden_states], dim=2)
74
+ else:
75
+ pad_left = hidden_states[:, :, :1, :, :].repeat((1, 1, (time_kernel_size - 1) // 2, 1, 1))
76
+ pad_right = hidden_states[:, :, -1:, :, :].repeat((1, 1, (time_kernel_size - 1) // 2, 1, 1))
77
+ hidden_states = torch.concatenate([pad_left, hidden_states, pad_right], dim=2)
78
+
79
+ hidden_states = self.conv(hidden_states)
80
+ return hidden_states
81
+
82
+
83
+ class LTXVideoResnetBlock3d(nn.Module):
84
+ r"""
85
+ A 3D ResNet block used in the LTXVideo model.
86
+
87
+ Args:
88
+ in_channels (`int`):
89
+ Number of input channels.
90
+ out_channels (`int`, *optional*):
91
+ Number of output channels. If None, defaults to `in_channels`.
92
+ dropout (`float`, defaults to `0.0`):
93
+ Dropout rate.
94
+ eps (`float`, defaults to `1e-6`):
95
+ Epsilon value for normalization layers.
96
+ elementwise_affine (`bool`, defaults to `False`):
97
+ Whether to enable elementwise affinity in the normalization layers.
98
+ non_linearity (`str`, defaults to `"swish"`):
99
+ Activation function to use.
100
+ conv_shortcut (bool, defaults to `False`):
101
+ Whether or not to use a convolution shortcut.
102
+ """
103
+
104
+ def __init__(
105
+ self,
106
+ in_channels: int,
107
+ out_channels: Optional[int] = None,
108
+ dropout: float = 0.0,
109
+ eps: float = 1e-6,
110
+ elementwise_affine: bool = False,
111
+ non_linearity: str = "swish",
112
+ is_causal: bool = True,
113
+ inject_noise: bool = False,
114
+ timestep_conditioning: bool = False,
115
+ ) -> None:
116
+ super().__init__()
117
+
118
+ out_channels = out_channels or in_channels
119
+
120
+ self.nonlinearity = get_activation(non_linearity)
121
+
122
+ self.norm1 = RMSNorm(in_channels, eps=1e-8, elementwise_affine=elementwise_affine)
123
+ self.conv1 = LTXVideoCausalConv3d(
124
+ in_channels=in_channels, out_channels=out_channels, kernel_size=3, is_causal=is_causal
125
+ )
126
+
127
+ self.norm2 = RMSNorm(out_channels, eps=1e-8, elementwise_affine=elementwise_affine)
128
+ self.dropout = nn.Dropout(dropout)
129
+ self.conv2 = LTXVideoCausalConv3d(
130
+ in_channels=out_channels, out_channels=out_channels, kernel_size=3, is_causal=is_causal
131
+ )
132
+
133
+ self.norm3 = None
134
+ self.conv_shortcut = None
135
+ if in_channels != out_channels:
136
+ self.norm3 = nn.LayerNorm(in_channels, eps=eps, elementwise_affine=True, bias=True)
137
+ self.conv_shortcut = LTXVideoCausalConv3d(
138
+ in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, is_causal=is_causal
139
+ )
140
+
141
+ self.per_channel_scale1 = None
142
+ self.per_channel_scale2 = None
143
+ if inject_noise:
144
+ self.per_channel_scale1 = nn.Parameter(torch.zeros(in_channels, 1, 1))
145
+ self.per_channel_scale2 = nn.Parameter(torch.zeros(in_channels, 1, 1))
146
+
147
+ self.scale_shift_table = None
148
+ if timestep_conditioning:
149
+ self.scale_shift_table = nn.Parameter(torch.randn(4, in_channels) / in_channels**0.5)
150
+
151
+ def forward(
152
+ self, inputs: torch.Tensor, temb: Optional[torch.Tensor] = None, generator: Optional[torch.Generator] = None
153
+ ) -> torch.Tensor:
154
+ hidden_states = inputs
155
+
156
+ hidden_states = self.norm1(hidden_states.movedim(1, -1)).movedim(-1, 1)
157
+
158
+ if self.scale_shift_table is not None:
159
+ temb = temb.unflatten(1, (4, -1)) + self.scale_shift_table[None, ..., None, None, None]
160
+ shift_1, scale_1, shift_2, scale_2 = temb.unbind(dim=1)
161
+ hidden_states = hidden_states * (1 + scale_1) + shift_1
162
+
163
+ hidden_states = self.nonlinearity(hidden_states)
164
+ hidden_states = self.conv1(hidden_states)
165
+
166
+ if self.per_channel_scale1 is not None:
167
+ spatial_shape = hidden_states.shape[-2:]
168
+ spatial_noise = torch.randn(
169
+ spatial_shape, generator=generator, device=hidden_states.device, dtype=hidden_states.dtype
170
+ )[None]
171
+ hidden_states = hidden_states + (spatial_noise * self.per_channel_scale1)[None, :, None, ...]
172
+
173
+ hidden_states = self.norm2(hidden_states.movedim(1, -1)).movedim(-1, 1)
174
+
175
+ if self.scale_shift_table is not None:
176
+ hidden_states = hidden_states * (1 + scale_2) + shift_2
177
+
178
+ hidden_states = self.nonlinearity(hidden_states)
179
+ hidden_states = self.dropout(hidden_states)
180
+ hidden_states = self.conv2(hidden_states)
181
+
182
+ if self.per_channel_scale2 is not None:
183
+ spatial_shape = hidden_states.shape[-2:]
184
+ spatial_noise = torch.randn(
185
+ spatial_shape, generator=generator, device=hidden_states.device, dtype=hidden_states.dtype
186
+ )[None]
187
+ hidden_states = hidden_states + (spatial_noise * self.per_channel_scale2)[None, :, None, ...]
188
+
189
+ if self.norm3 is not None:
190
+ inputs = self.norm3(inputs.movedim(1, -1)).movedim(-1, 1)
191
+
192
+ if self.conv_shortcut is not None:
193
+ inputs = self.conv_shortcut(inputs)
194
+
195
+ hidden_states = hidden_states + inputs
196
+ return hidden_states
197
+
198
+
199
+ class LTXVideoDownsampler3d(nn.Module):
200
+ def __init__(
201
+ self,
202
+ in_channels: int,
203
+ out_channels: int,
204
+ stride: Union[int, Tuple[int, int, int]] = 1,
205
+ is_causal: bool = True,
206
+ padding_mode: str = "zeros",
207
+ ) -> None:
208
+ super().__init__()
209
+
210
+ self.stride = stride if isinstance(stride, tuple) else (stride, stride, stride)
211
+ self.group_size = (in_channels * stride[0] * stride[1] * stride[2]) // out_channels
212
+
213
+ out_channels = out_channels // (self.stride[0] * self.stride[1] * self.stride[2])
214
+
215
+ self.conv = LTXVideoCausalConv3d(
216
+ in_channels=in_channels,
217
+ out_channels=out_channels,
218
+ kernel_size=3,
219
+ stride=1,
220
+ is_causal=is_causal,
221
+ padding_mode=padding_mode,
222
+ )
223
+
224
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
225
+ hidden_states = torch.cat([hidden_states[:, :, : self.stride[0] - 1], hidden_states], dim=2)
226
+
227
+ residual = (
228
+ hidden_states.unflatten(4, (-1, self.stride[2]))
229
+ .unflatten(3, (-1, self.stride[1]))
230
+ .unflatten(2, (-1, self.stride[0]))
231
+ )
232
+ residual = residual.permute(0, 1, 3, 5, 7, 2, 4, 6).flatten(1, 4)
233
+ residual = residual.unflatten(1, (-1, self.group_size))
234
+ residual = residual.mean(dim=2)
235
+
236
+ hidden_states = self.conv(hidden_states)
237
+ hidden_states = (
238
+ hidden_states.unflatten(4, (-1, self.stride[2]))
239
+ .unflatten(3, (-1, self.stride[1]))
240
+ .unflatten(2, (-1, self.stride[0]))
241
+ )
242
+ hidden_states = hidden_states.permute(0, 1, 3, 5, 7, 2, 4, 6).flatten(1, 4)
243
+ hidden_states = hidden_states + residual
244
+
245
+ return hidden_states
246
+
247
+
248
+ class LTXVideoUpsampler3d(nn.Module):
249
+ def __init__(
250
+ self,
251
+ in_channels: int,
252
+ stride: Union[int, Tuple[int, int, int]] = 1,
253
+ is_causal: bool = True,
254
+ residual: bool = False,
255
+ upscale_factor: int = 1,
256
+ padding_mode: str = "zeros",
257
+ ) -> None:
258
+ super().__init__()
259
+
260
+ self.stride = stride if isinstance(stride, tuple) else (stride, stride, stride)
261
+ self.residual = residual
262
+ self.upscale_factor = upscale_factor
263
+
264
+ out_channels = (in_channels * stride[0] * stride[1] * stride[2]) // upscale_factor
265
+
266
+ self.conv = LTXVideoCausalConv3d(
267
+ in_channels=in_channels,
268
+ out_channels=out_channels,
269
+ kernel_size=3,
270
+ stride=1,
271
+ is_causal=is_causal,
272
+ padding_mode=padding_mode,
273
+ )
274
+
275
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
276
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
277
+
278
+ if self.residual:
279
+ residual = hidden_states.reshape(
280
+ batch_size, -1, self.stride[0], self.stride[1], self.stride[2], num_frames, height, width
281
+ )
282
+ residual = residual.permute(0, 1, 5, 2, 6, 3, 7, 4).flatten(6, 7).flatten(4, 5).flatten(2, 3)
283
+ repeats = (self.stride[0] * self.stride[1] * self.stride[2]) // self.upscale_factor
284
+ residual = residual.repeat(1, repeats, 1, 1, 1)
285
+ residual = residual[:, :, self.stride[0] - 1 :]
286
+
287
+ hidden_states = self.conv(hidden_states)
288
+ hidden_states = hidden_states.reshape(
289
+ batch_size, -1, self.stride[0], self.stride[1], self.stride[2], num_frames, height, width
290
+ )
291
+ hidden_states = hidden_states.permute(0, 1, 5, 2, 6, 3, 7, 4).flatten(6, 7).flatten(4, 5).flatten(2, 3)
292
+ hidden_states = hidden_states[:, :, self.stride[0] - 1 :]
293
+
294
+ if self.residual:
295
+ hidden_states = hidden_states + residual
296
+
297
+ return hidden_states
298
+
299
+
300
+ class LTXVideoDownBlock3D(nn.Module):
301
+ r"""
302
+ Down block used in the LTXVideo model.
303
+
304
+ Args:
305
+ in_channels (`int`):
306
+ Number of input channels.
307
+ out_channels (`int`, *optional*):
308
+ Number of output channels. If None, defaults to `in_channels`.
309
+ num_layers (`int`, defaults to `1`):
310
+ Number of resnet layers.
311
+ dropout (`float`, defaults to `0.0`):
312
+ Dropout rate.
313
+ resnet_eps (`float`, defaults to `1e-6`):
314
+ Epsilon value for normalization layers.
315
+ resnet_act_fn (`str`, defaults to `"swish"`):
316
+ Activation function to use.
317
+ spatio_temporal_scale (`bool`, defaults to `True`):
318
+ Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension.
319
+ Whether or not to downsample across temporal dimension.
320
+ is_causal (`bool`, defaults to `True`):
321
+ Whether this layer behaves causally (future frames depend only on past frames) or not.
322
+ """
323
+
324
+ _supports_gradient_checkpointing = True
325
+
326
+ def __init__(
327
+ self,
328
+ in_channels: int,
329
+ out_channels: Optional[int] = None,
330
+ num_layers: int = 1,
331
+ dropout: float = 0.0,
332
+ resnet_eps: float = 1e-6,
333
+ resnet_act_fn: str = "swish",
334
+ spatio_temporal_scale: bool = True,
335
+ is_causal: bool = True,
336
+ ):
337
+ super().__init__()
338
+
339
+ out_channels = out_channels or in_channels
340
+
341
+ resnets = []
342
+ for _ in range(num_layers):
343
+ resnets.append(
344
+ LTXVideoResnetBlock3d(
345
+ in_channels=in_channels,
346
+ out_channels=in_channels,
347
+ dropout=dropout,
348
+ eps=resnet_eps,
349
+ non_linearity=resnet_act_fn,
350
+ is_causal=is_causal,
351
+ )
352
+ )
353
+ self.resnets = nn.ModuleList(resnets)
354
+
355
+ self.downsamplers = None
356
+ if spatio_temporal_scale:
357
+ self.downsamplers = nn.ModuleList(
358
+ [
359
+ LTXVideoCausalConv3d(
360
+ in_channels=in_channels,
361
+ out_channels=in_channels,
362
+ kernel_size=3,
363
+ stride=(2, 2, 2),
364
+ is_causal=is_causal,
365
+ )
366
+ ]
367
+ )
368
+
369
+ self.conv_out = None
370
+ if in_channels != out_channels:
371
+ self.conv_out = LTXVideoResnetBlock3d(
372
+ in_channels=in_channels,
373
+ out_channels=out_channels,
374
+ dropout=dropout,
375
+ eps=resnet_eps,
376
+ non_linearity=resnet_act_fn,
377
+ is_causal=is_causal,
378
+ )
379
+
380
+ self.gradient_checkpointing = False
381
+
382
+ def forward(
383
+ self,
384
+ hidden_states: torch.Tensor,
385
+ temb: Optional[torch.Tensor] = None,
386
+ generator: Optional[torch.Generator] = None,
387
+ ) -> torch.Tensor:
388
+ r"""Forward method of the `LTXDownBlock3D` class."""
389
+
390
+ for i, resnet in enumerate(self.resnets):
391
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
392
+ hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb, generator)
393
+ else:
394
+ hidden_states = resnet(hidden_states, temb, generator)
395
+
396
+ if self.downsamplers is not None:
397
+ for downsampler in self.downsamplers:
398
+ hidden_states = downsampler(hidden_states)
399
+
400
+ if self.conv_out is not None:
401
+ hidden_states = self.conv_out(hidden_states, temb, generator)
402
+
403
+ return hidden_states
404
+
405
+
406
+ class LTXVideo095DownBlock3D(nn.Module):
407
+ r"""
408
+ Down block used in the LTXVideo model.
409
+
410
+ Args:
411
+ in_channels (`int`):
412
+ Number of input channels.
413
+ out_channels (`int`, *optional*):
414
+ Number of output channels. If None, defaults to `in_channels`.
415
+ num_layers (`int`, defaults to `1`):
416
+ Number of resnet layers.
417
+ dropout (`float`, defaults to `0.0`):
418
+ Dropout rate.
419
+ resnet_eps (`float`, defaults to `1e-6`):
420
+ Epsilon value for normalization layers.
421
+ resnet_act_fn (`str`, defaults to `"swish"`):
422
+ Activation function to use.
423
+ spatio_temporal_scale (`bool`, defaults to `True`):
424
+ Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension.
425
+ Whether or not to downsample across temporal dimension.
426
+ is_causal (`bool`, defaults to `True`):
427
+ Whether this layer behaves causally (future frames depend only on past frames) or not.
428
+ """
429
+
430
+ _supports_gradient_checkpointing = True
431
+
432
+ def __init__(
433
+ self,
434
+ in_channels: int,
435
+ out_channels: Optional[int] = None,
436
+ num_layers: int = 1,
437
+ dropout: float = 0.0,
438
+ resnet_eps: float = 1e-6,
439
+ resnet_act_fn: str = "swish",
440
+ spatio_temporal_scale: bool = True,
441
+ is_causal: bool = True,
442
+ downsample_type: str = "conv",
443
+ ):
444
+ super().__init__()
445
+
446
+ out_channels = out_channels or in_channels
447
+
448
+ resnets = []
449
+ for _ in range(num_layers):
450
+ resnets.append(
451
+ LTXVideoResnetBlock3d(
452
+ in_channels=in_channels,
453
+ out_channels=in_channels,
454
+ dropout=dropout,
455
+ eps=resnet_eps,
456
+ non_linearity=resnet_act_fn,
457
+ is_causal=is_causal,
458
+ )
459
+ )
460
+ self.resnets = nn.ModuleList(resnets)
461
+
462
+ self.downsamplers = None
463
+ if spatio_temporal_scale:
464
+ self.downsamplers = nn.ModuleList()
465
+
466
+ if downsample_type == "conv":
467
+ self.downsamplers.append(
468
+ LTXVideoCausalConv3d(
469
+ in_channels=in_channels,
470
+ out_channels=in_channels,
471
+ kernel_size=3,
472
+ stride=(2, 2, 2),
473
+ is_causal=is_causal,
474
+ )
475
+ )
476
+ elif downsample_type == "spatial":
477
+ self.downsamplers.append(
478
+ LTXVideoDownsampler3d(
479
+ in_channels=in_channels, out_channels=out_channels, stride=(1, 2, 2), is_causal=is_causal
480
+ )
481
+ )
482
+ elif downsample_type == "temporal":
483
+ self.downsamplers.append(
484
+ LTXVideoDownsampler3d(
485
+ in_channels=in_channels, out_channels=out_channels, stride=(2, 1, 1), is_causal=is_causal
486
+ )
487
+ )
488
+ elif downsample_type == "spatiotemporal":
489
+ self.downsamplers.append(
490
+ LTXVideoDownsampler3d(
491
+ in_channels=in_channels, out_channels=out_channels, stride=(2, 2, 2), is_causal=is_causal
492
+ )
493
+ )
494
+
495
+ self.gradient_checkpointing = False
496
+
497
+ def forward(
498
+ self,
499
+ hidden_states: torch.Tensor,
500
+ temb: Optional[torch.Tensor] = None,
501
+ generator: Optional[torch.Generator] = None,
502
+ ) -> torch.Tensor:
503
+ r"""Forward method of the `LTXDownBlock3D` class."""
504
+
505
+ for i, resnet in enumerate(self.resnets):
506
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
507
+ hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb, generator)
508
+ else:
509
+ hidden_states = resnet(hidden_states, temb, generator)
510
+
511
+ if self.downsamplers is not None:
512
+ for downsampler in self.downsamplers:
513
+ hidden_states = downsampler(hidden_states)
514
+
515
+ return hidden_states
516
+
517
+
518
+ # Adapted from diffusers.models.autoencoders.autoencoder_kl_cogvideox.CogVideoMidBlock3d
519
+ class LTXVideoMidBlock3d(nn.Module):
520
+ r"""
521
+ A middle block used in the LTXVideo model.
522
+
523
+ Args:
524
+ in_channels (`int`):
525
+ Number of input channels.
526
+ num_layers (`int`, defaults to `1`):
527
+ Number of resnet layers.
528
+ dropout (`float`, defaults to `0.0`):
529
+ Dropout rate.
530
+ resnet_eps (`float`, defaults to `1e-6`):
531
+ Epsilon value for normalization layers.
532
+ resnet_act_fn (`str`, defaults to `"swish"`):
533
+ Activation function to use.
534
+ is_causal (`bool`, defaults to `True`):
535
+ Whether this layer behaves causally (future frames depend only on past frames) or not.
536
+ """
537
+
538
+ _supports_gradient_checkpointing = True
539
+
540
+ def __init__(
541
+ self,
542
+ in_channels: int,
543
+ num_layers: int = 1,
544
+ dropout: float = 0.0,
545
+ resnet_eps: float = 1e-6,
546
+ resnet_act_fn: str = "swish",
547
+ is_causal: bool = True,
548
+ inject_noise: bool = False,
549
+ timestep_conditioning: bool = False,
550
+ ) -> None:
551
+ super().__init__()
552
+
553
+ self.time_embedder = None
554
+ if timestep_conditioning:
555
+ self.time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings(in_channels * 4, 0)
556
+
557
+ resnets = []
558
+ for _ in range(num_layers):
559
+ resnets.append(
560
+ LTXVideoResnetBlock3d(
561
+ in_channels=in_channels,
562
+ out_channels=in_channels,
563
+ dropout=dropout,
564
+ eps=resnet_eps,
565
+ non_linearity=resnet_act_fn,
566
+ is_causal=is_causal,
567
+ inject_noise=inject_noise,
568
+ timestep_conditioning=timestep_conditioning,
569
+ )
570
+ )
571
+ self.resnets = nn.ModuleList(resnets)
572
+
573
+ self.gradient_checkpointing = False
574
+
575
+ def forward(
576
+ self,
577
+ hidden_states: torch.Tensor,
578
+ temb: Optional[torch.Tensor] = None,
579
+ generator: Optional[torch.Generator] = None,
580
+ ) -> torch.Tensor:
581
+ r"""Forward method of the `LTXMidBlock3D` class."""
582
+
583
+ if self.time_embedder is not None:
584
+ temb = self.time_embedder(
585
+ timestep=temb.flatten(),
586
+ resolution=None,
587
+ aspect_ratio=None,
588
+ batch_size=hidden_states.size(0),
589
+ hidden_dtype=hidden_states.dtype,
590
+ )
591
+ temb = temb.view(hidden_states.size(0), -1, 1, 1, 1)
592
+
593
+ for i, resnet in enumerate(self.resnets):
594
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
595
+ hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb, generator)
596
+ else:
597
+ hidden_states = resnet(hidden_states, temb, generator)
598
+
599
+ return hidden_states
600
+
601
+
602
+ class LTXVideoUpBlock3d(nn.Module):
603
+ r"""
604
+ Up block used in the LTXVideo model.
605
+
606
+ Args:
607
+ in_channels (`int`):
608
+ Number of input channels.
609
+ out_channels (`int`, *optional*):
610
+ Number of output channels. If None, defaults to `in_channels`.
611
+ num_layers (`int`, defaults to `1`):
612
+ Number of resnet layers.
613
+ dropout (`float`, defaults to `0.0`):
614
+ Dropout rate.
615
+ resnet_eps (`float`, defaults to `1e-6`):
616
+ Epsilon value for normalization layers.
617
+ resnet_act_fn (`str`, defaults to `"swish"`):
618
+ Activation function to use.
619
+ spatio_temporal_scale (`bool`, defaults to `True`):
620
+ Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension.
621
+ Whether or not to downsample across temporal dimension.
622
+ is_causal (`bool`, defaults to `True`):
623
+ Whether this layer behaves causally (future frames depend only on past frames) or not.
624
+ """
625
+
626
+ _supports_gradient_checkpointing = True
627
+
628
+ def __init__(
629
+ self,
630
+ in_channels: int,
631
+ out_channels: Optional[int] = None,
632
+ num_layers: int = 1,
633
+ dropout: float = 0.0,
634
+ resnet_eps: float = 1e-6,
635
+ resnet_act_fn: str = "swish",
636
+ spatio_temporal_scale: bool = True,
637
+ is_causal: bool = True,
638
+ inject_noise: bool = False,
639
+ timestep_conditioning: bool = False,
640
+ upsample_residual: bool = False,
641
+ upscale_factor: int = 1,
642
+ ):
643
+ super().__init__()
644
+
645
+ out_channels = out_channels or in_channels
646
+
647
+ self.time_embedder = None
648
+ if timestep_conditioning:
649
+ self.time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings(in_channels * 4, 0)
650
+
651
+ self.conv_in = None
652
+ if in_channels != out_channels:
653
+ self.conv_in = LTXVideoResnetBlock3d(
654
+ in_channels=in_channels,
655
+ out_channels=out_channels,
656
+ dropout=dropout,
657
+ eps=resnet_eps,
658
+ non_linearity=resnet_act_fn,
659
+ is_causal=is_causal,
660
+ inject_noise=inject_noise,
661
+ timestep_conditioning=timestep_conditioning,
662
+ )
663
+
664
+ self.upsamplers = None
665
+ if spatio_temporal_scale:
666
+ self.upsamplers = nn.ModuleList(
667
+ [
668
+ LTXVideoUpsampler3d(
669
+ out_channels * upscale_factor,
670
+ stride=(2, 2, 2),
671
+ is_causal=is_causal,
672
+ residual=upsample_residual,
673
+ upscale_factor=upscale_factor,
674
+ )
675
+ ]
676
+ )
677
+
678
+ resnets = []
679
+ for _ in range(num_layers):
680
+ resnets.append(
681
+ LTXVideoResnetBlock3d(
682
+ in_channels=out_channels,
683
+ out_channels=out_channels,
684
+ dropout=dropout,
685
+ eps=resnet_eps,
686
+ non_linearity=resnet_act_fn,
687
+ is_causal=is_causal,
688
+ inject_noise=inject_noise,
689
+ timestep_conditioning=timestep_conditioning,
690
+ )
691
+ )
692
+ self.resnets = nn.ModuleList(resnets)
693
+
694
+ self.gradient_checkpointing = False
695
+
696
+ def forward(
697
+ self,
698
+ hidden_states: torch.Tensor,
699
+ temb: Optional[torch.Tensor] = None,
700
+ generator: Optional[torch.Generator] = None,
701
+ ) -> torch.Tensor:
702
+ if self.conv_in is not None:
703
+ hidden_states = self.conv_in(hidden_states, temb, generator)
704
+
705
+ if self.time_embedder is not None:
706
+ temb = self.time_embedder(
707
+ timestep=temb.flatten(),
708
+ resolution=None,
709
+ aspect_ratio=None,
710
+ batch_size=hidden_states.size(0),
711
+ hidden_dtype=hidden_states.dtype,
712
+ )
713
+ temb = temb.view(hidden_states.size(0), -1, 1, 1, 1)
714
+
715
+ if self.upsamplers is not None:
716
+ for upsampler in self.upsamplers:
717
+ hidden_states = upsampler(hidden_states)
718
+
719
+ for i, resnet in enumerate(self.resnets):
720
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
721
+ hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb, generator)
722
+ else:
723
+ hidden_states = resnet(hidden_states, temb, generator)
724
+
725
+ return hidden_states
726
+
727
+
728
+ class LTXVideoEncoder3d(nn.Module):
729
+ r"""
730
+ The `LTXVideoEncoder3d` layer of a variational autoencoder that encodes input video samples to its latent
731
+ representation.
732
+
733
+ Args:
734
+ in_channels (`int`, defaults to 3):
735
+ Number of input channels.
736
+ out_channels (`int`, defaults to 128):
737
+ Number of latent channels.
738
+ block_out_channels (`Tuple[int, ...]`, defaults to `(128, 256, 512, 512)`):
739
+ The number of output channels for each block.
740
+ spatio_temporal_scaling (`Tuple[bool, ...], defaults to `(True, True, True, False)`:
741
+ Whether a block should contain spatio-temporal downscaling layers or not.
742
+ layers_per_block (`Tuple[int, ...]`, defaults to `(4, 3, 3, 3, 4)`):
743
+ The number of layers per block.
744
+ patch_size (`int`, defaults to `4`):
745
+ The size of spatial patches.
746
+ patch_size_t (`int`, defaults to `1`):
747
+ The size of temporal patches.
748
+ resnet_norm_eps (`float`, defaults to `1e-6`):
749
+ Epsilon value for ResNet normalization layers.
750
+ is_causal (`bool`, defaults to `True`):
751
+ Whether this layer behaves causally (future frames depend only on past frames) or not.
752
+ """
753
+
754
+ def __init__(
755
+ self,
756
+ in_channels: int = 3,
757
+ out_channels: int = 128,
758
+ block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
759
+ down_block_types: Tuple[str, ...] = (
760
+ "LTXVideoDownBlock3D",
761
+ "LTXVideoDownBlock3D",
762
+ "LTXVideoDownBlock3D",
763
+ "LTXVideoDownBlock3D",
764
+ ),
765
+ spatio_temporal_scaling: Tuple[bool, ...] = (True, True, True, False),
766
+ layers_per_block: Tuple[int, ...] = (4, 3, 3, 3, 4),
767
+ downsample_type: Tuple[str, ...] = ("conv", "conv", "conv", "conv"),
768
+ patch_size: int = 4,
769
+ patch_size_t: int = 1,
770
+ resnet_norm_eps: float = 1e-6,
771
+ is_causal: bool = True,
772
+ ):
773
+ super().__init__()
774
+
775
+ self.patch_size = patch_size
776
+ self.patch_size_t = patch_size_t
777
+ self.in_channels = in_channels * patch_size**2
778
+
779
+ output_channel = block_out_channels[0]
780
+
781
+ self.conv_in = LTXVideoCausalConv3d(
782
+ in_channels=self.in_channels,
783
+ out_channels=output_channel,
784
+ kernel_size=3,
785
+ stride=1,
786
+ is_causal=is_causal,
787
+ )
788
+
789
+ # down blocks
790
+ is_ltx_095 = down_block_types[-1] == "LTXVideo095DownBlock3D"
791
+ num_block_out_channels = len(block_out_channels) - (1 if is_ltx_095 else 0)
792
+ self.down_blocks = nn.ModuleList([])
793
+ for i in range(num_block_out_channels):
794
+ input_channel = output_channel
795
+ if not is_ltx_095:
796
+ output_channel = block_out_channels[i + 1] if i + 1 < num_block_out_channels else block_out_channels[i]
797
+ else:
798
+ output_channel = block_out_channels[i + 1]
799
+
800
+ if down_block_types[i] == "LTXVideoDownBlock3D":
801
+ down_block = LTXVideoDownBlock3D(
802
+ in_channels=input_channel,
803
+ out_channels=output_channel,
804
+ num_layers=layers_per_block[i],
805
+ resnet_eps=resnet_norm_eps,
806
+ spatio_temporal_scale=spatio_temporal_scaling[i],
807
+ is_causal=is_causal,
808
+ )
809
+ elif down_block_types[i] == "LTXVideo095DownBlock3D":
810
+ down_block = LTXVideo095DownBlock3D(
811
+ in_channels=input_channel,
812
+ out_channels=output_channel,
813
+ num_layers=layers_per_block[i],
814
+ resnet_eps=resnet_norm_eps,
815
+ spatio_temporal_scale=spatio_temporal_scaling[i],
816
+ is_causal=is_causal,
817
+ downsample_type=downsample_type[i],
818
+ )
819
+ else:
820
+ raise ValueError(f"Unknown down block type: {down_block_types[i]}")
821
+
822
+ self.down_blocks.append(down_block)
823
+
824
+ # mid block
825
+ self.mid_block = LTXVideoMidBlock3d(
826
+ in_channels=output_channel,
827
+ num_layers=layers_per_block[-1],
828
+ resnet_eps=resnet_norm_eps,
829
+ is_causal=is_causal,
830
+ )
831
+
832
+ # out
833
+ self.norm_out = RMSNorm(out_channels, eps=1e-8, elementwise_affine=False)
834
+ self.conv_act = nn.SiLU()
835
+ self.conv_out = LTXVideoCausalConv3d(
836
+ in_channels=output_channel, out_channels=out_channels + 1, kernel_size=3, stride=1, is_causal=is_causal
837
+ )
838
+
839
+ self.gradient_checkpointing = False
840
+
841
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
842
+ r"""The forward method of the `LTXVideoEncoder3d` class."""
843
+
844
+ p = self.patch_size
845
+ p_t = self.patch_size_t
846
+
847
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
848
+ post_patch_num_frames = num_frames // p_t
849
+ post_patch_height = height // p
850
+ post_patch_width = width // p
851
+
852
+ hidden_states = hidden_states.reshape(
853
+ batch_size, num_channels, post_patch_num_frames, p_t, post_patch_height, p, post_patch_width, p
854
+ )
855
+ # Thanks for driving me insane with the weird patching order :(
856
+ hidden_states = hidden_states.permute(0, 1, 3, 7, 5, 2, 4, 6).flatten(1, 4)
857
+ hidden_states = self.conv_in(hidden_states)
858
+
859
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
860
+ for down_block in self.down_blocks:
861
+ hidden_states = self._gradient_checkpointing_func(down_block, hidden_states)
862
+
863
+ hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states)
864
+ else:
865
+ for down_block in self.down_blocks:
866
+ hidden_states = down_block(hidden_states)
867
+
868
+ hidden_states = self.mid_block(hidden_states)
869
+
870
+ hidden_states = self.norm_out(hidden_states.movedim(1, -1)).movedim(-1, 1)
871
+ hidden_states = self.conv_act(hidden_states)
872
+ hidden_states = self.conv_out(hidden_states)
873
+
874
+ last_channel = hidden_states[:, -1:]
875
+ last_channel = last_channel.repeat(1, hidden_states.size(1) - 2, 1, 1, 1)
876
+ hidden_states = torch.cat([hidden_states, last_channel], dim=1)
877
+
878
+ return hidden_states
879
+
880
+
881
+ class LTXVideoDecoder3d(nn.Module):
882
+ r"""
883
+ The `LTXVideoDecoder3d` layer of a variational autoencoder that decodes its latent representation into an output
884
+ sample.
885
+
886
+ Args:
887
+ in_channels (`int`, defaults to 128):
888
+ Number of latent channels.
889
+ out_channels (`int`, defaults to 3):
890
+ Number of output channels.
891
+ block_out_channels (`Tuple[int, ...]`, defaults to `(128, 256, 512, 512)`):
892
+ The number of output channels for each block.
893
+ spatio_temporal_scaling (`Tuple[bool, ...], defaults to `(True, True, True, False)`:
894
+ Whether a block should contain spatio-temporal upscaling layers or not.
895
+ layers_per_block (`Tuple[int, ...]`, defaults to `(4, 3, 3, 3, 4)`):
896
+ The number of layers per block.
897
+ patch_size (`int`, defaults to `4`):
898
+ The size of spatial patches.
899
+ patch_size_t (`int`, defaults to `1`):
900
+ The size of temporal patches.
901
+ resnet_norm_eps (`float`, defaults to `1e-6`):
902
+ Epsilon value for ResNet normalization layers.
903
+ is_causal (`bool`, defaults to `False`):
904
+ Whether this layer behaves causally (future frames depend only on past frames) or not.
905
+ timestep_conditioning (`bool`, defaults to `False`):
906
+ Whether to condition the model on timesteps.
907
+ """
908
+
909
+ def __init__(
910
+ self,
911
+ in_channels: int = 128,
912
+ out_channels: int = 3,
913
+ block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
914
+ spatio_temporal_scaling: Tuple[bool, ...] = (True, True, True, False),
915
+ layers_per_block: Tuple[int, ...] = (4, 3, 3, 3, 4),
916
+ patch_size: int = 4,
917
+ patch_size_t: int = 1,
918
+ resnet_norm_eps: float = 1e-6,
919
+ is_causal: bool = False,
920
+ inject_noise: Tuple[bool, ...] = (False, False, False, False),
921
+ timestep_conditioning: bool = False,
922
+ upsample_residual: Tuple[bool, ...] = (False, False, False, False),
923
+ upsample_factor: Tuple[bool, ...] = (1, 1, 1, 1),
924
+ ) -> None:
925
+ super().__init__()
926
+
927
+ self.patch_size = patch_size
928
+ self.patch_size_t = patch_size_t
929
+ self.out_channels = out_channels * patch_size**2
930
+
931
+ block_out_channels = tuple(reversed(block_out_channels))
932
+ spatio_temporal_scaling = tuple(reversed(spatio_temporal_scaling))
933
+ layers_per_block = tuple(reversed(layers_per_block))
934
+ inject_noise = tuple(reversed(inject_noise))
935
+ upsample_residual = tuple(reversed(upsample_residual))
936
+ upsample_factor = tuple(reversed(upsample_factor))
937
+ output_channel = block_out_channels[0]
938
+
939
+ self.conv_in = LTXVideoCausalConv3d(
940
+ in_channels=in_channels, out_channels=output_channel, kernel_size=3, stride=1, is_causal=is_causal
941
+ )
942
+
943
+ self.mid_block = LTXVideoMidBlock3d(
944
+ in_channels=output_channel,
945
+ num_layers=layers_per_block[0],
946
+ resnet_eps=resnet_norm_eps,
947
+ is_causal=is_causal,
948
+ inject_noise=inject_noise[0],
949
+ timestep_conditioning=timestep_conditioning,
950
+ )
951
+
952
+ # up blocks
953
+ num_block_out_channels = len(block_out_channels)
954
+ self.up_blocks = nn.ModuleList([])
955
+ for i in range(num_block_out_channels):
956
+ input_channel = output_channel // upsample_factor[i]
957
+ output_channel = block_out_channels[i] // upsample_factor[i]
958
+
959
+ up_block = LTXVideoUpBlock3d(
960
+ in_channels=input_channel,
961
+ out_channels=output_channel,
962
+ num_layers=layers_per_block[i + 1],
963
+ resnet_eps=resnet_norm_eps,
964
+ spatio_temporal_scale=spatio_temporal_scaling[i],
965
+ is_causal=is_causal,
966
+ inject_noise=inject_noise[i + 1],
967
+ timestep_conditioning=timestep_conditioning,
968
+ upsample_residual=upsample_residual[i],
969
+ upscale_factor=upsample_factor[i],
970
+ )
971
+
972
+ self.up_blocks.append(up_block)
973
+
974
+ # out
975
+ self.norm_out = RMSNorm(out_channels, eps=1e-8, elementwise_affine=False)
976
+ self.conv_act = nn.SiLU()
977
+ self.conv_out = LTXVideoCausalConv3d(
978
+ in_channels=output_channel, out_channels=self.out_channels, kernel_size=3, stride=1, is_causal=is_causal
979
+ )
980
+
981
+ # timestep embedding
982
+ self.time_embedder = None
983
+ self.scale_shift_table = None
984
+ self.timestep_scale_multiplier = None
985
+ if timestep_conditioning:
986
+ self.timestep_scale_multiplier = nn.Parameter(torch.tensor(1000.0, dtype=torch.float32))
987
+ self.time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings(output_channel * 2, 0)
988
+ self.scale_shift_table = nn.Parameter(torch.randn(2, output_channel) / output_channel**0.5)
989
+
990
+ self.gradient_checkpointing = False
991
+
992
+ def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor:
993
+ hidden_states = self.conv_in(hidden_states)
994
+
995
+ if self.timestep_scale_multiplier is not None:
996
+ temb = temb * self.timestep_scale_multiplier
997
+
998
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
999
+ hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states, temb)
1000
+
1001
+ for up_block in self.up_blocks:
1002
+ hidden_states = self._gradient_checkpointing_func(up_block, hidden_states, temb)
1003
+ else:
1004
+ hidden_states = self.mid_block(hidden_states, temb)
1005
+
1006
+ for up_block in self.up_blocks:
1007
+ hidden_states = up_block(hidden_states, temb)
1008
+
1009
+ hidden_states = self.norm_out(hidden_states.movedim(1, -1)).movedim(-1, 1)
1010
+
1011
+ if self.time_embedder is not None:
1012
+ temb = self.time_embedder(
1013
+ timestep=temb.flatten(),
1014
+ resolution=None,
1015
+ aspect_ratio=None,
1016
+ batch_size=hidden_states.size(0),
1017
+ hidden_dtype=hidden_states.dtype,
1018
+ )
1019
+ temb = temb.view(hidden_states.size(0), -1, 1, 1, 1).unflatten(1, (2, -1))
1020
+ temb = temb + self.scale_shift_table[None, ..., None, None, None]
1021
+ shift, scale = temb.unbind(dim=1)
1022
+ hidden_states = hidden_states * (1 + scale) + shift
1023
+
1024
+ hidden_states = self.conv_act(hidden_states)
1025
+ hidden_states = self.conv_out(hidden_states)
1026
+
1027
+ p = self.patch_size
1028
+ p_t = self.patch_size_t
1029
+
1030
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
1031
+ hidden_states = hidden_states.reshape(batch_size, -1, p_t, p, p, num_frames, height, width)
1032
+ hidden_states = hidden_states.permute(0, 1, 5, 2, 6, 4, 7, 3).flatten(6, 7).flatten(4, 5).flatten(2, 3)
1033
+
1034
+ return hidden_states
1035
+
1036
+
1037
+ class AutoencoderKLLTXVideo(ModelMixin, ConfigMixin, FromOriginalModelMixin):
1038
+ r"""
1039
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in
1040
+ [LTX](https://huggingface.co/Lightricks/LTX-Video).
1041
+
1042
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
1043
+ for all models (such as downloading or saving).
1044
+
1045
+ Args:
1046
+ in_channels (`int`, defaults to `3`):
1047
+ Number of input channels.
1048
+ out_channels (`int`, defaults to `3`):
1049
+ Number of output channels.
1050
+ latent_channels (`int`, defaults to `128`):
1051
+ Number of latent channels.
1052
+ block_out_channels (`Tuple[int, ...]`, defaults to `(128, 256, 512, 512)`):
1053
+ The number of output channels for each block.
1054
+ spatio_temporal_scaling (`Tuple[bool, ...], defaults to `(True, True, True, False)`:
1055
+ Whether a block should contain spatio-temporal downscaling or not.
1056
+ layers_per_block (`Tuple[int, ...]`, defaults to `(4, 3, 3, 3, 4)`):
1057
+ The number of layers per block.
1058
+ patch_size (`int`, defaults to `4`):
1059
+ The size of spatial patches.
1060
+ patch_size_t (`int`, defaults to `1`):
1061
+ The size of temporal patches.
1062
+ resnet_norm_eps (`float`, defaults to `1e-6`):
1063
+ Epsilon value for ResNet normalization layers.
1064
+ scaling_factor (`float`, *optional*, defaults to `1.0`):
1065
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
1066
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
1067
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
1068
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
1069
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
1070
+ Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper.
1071
+ encoder_causal (`bool`, defaults to `True`):
1072
+ Whether the encoder should behave causally (future frames depend only on past frames) or not.
1073
+ decoder_causal (`bool`, defaults to `False`):
1074
+ Whether the decoder should behave causally (future frames depend only on past frames) or not.
1075
+ """
1076
+
1077
+ _supports_gradient_checkpointing = True
1078
+
1079
+ @register_to_config
1080
+ def __init__(
1081
+ self,
1082
+ in_channels: int = 3,
1083
+ out_channels: int = 3,
1084
+ latent_channels: int = 128,
1085
+ block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
1086
+ down_block_types: Tuple[str, ...] = (
1087
+ "LTXVideoDownBlock3D",
1088
+ "LTXVideoDownBlock3D",
1089
+ "LTXVideoDownBlock3D",
1090
+ "LTXVideoDownBlock3D",
1091
+ ),
1092
+ decoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
1093
+ layers_per_block: Tuple[int, ...] = (4, 3, 3, 3, 4),
1094
+ decoder_layers_per_block: Tuple[int, ...] = (4, 3, 3, 3, 4),
1095
+ spatio_temporal_scaling: Tuple[bool, ...] = (True, True, True, False),
1096
+ decoder_spatio_temporal_scaling: Tuple[bool, ...] = (True, True, True, False),
1097
+ decoder_inject_noise: Tuple[bool, ...] = (False, False, False, False, False),
1098
+ downsample_type: Tuple[str, ...] = ("conv", "conv", "conv", "conv"),
1099
+ upsample_residual: Tuple[bool, ...] = (False, False, False, False),
1100
+ upsample_factor: Tuple[int, ...] = (1, 1, 1, 1),
1101
+ timestep_conditioning: bool = False,
1102
+ patch_size: int = 4,
1103
+ patch_size_t: int = 1,
1104
+ resnet_norm_eps: float = 1e-6,
1105
+ scaling_factor: float = 1.0,
1106
+ encoder_causal: bool = True,
1107
+ decoder_causal: bool = False,
1108
+ spatial_compression_ratio: int = None,
1109
+ temporal_compression_ratio: int = None,
1110
+ ) -> None:
1111
+ super().__init__()
1112
+
1113
+ self.encoder = LTXVideoEncoder3d(
1114
+ in_channels=in_channels,
1115
+ out_channels=latent_channels,
1116
+ block_out_channels=block_out_channels,
1117
+ down_block_types=down_block_types,
1118
+ spatio_temporal_scaling=spatio_temporal_scaling,
1119
+ layers_per_block=layers_per_block,
1120
+ downsample_type=downsample_type,
1121
+ patch_size=patch_size,
1122
+ patch_size_t=patch_size_t,
1123
+ resnet_norm_eps=resnet_norm_eps,
1124
+ is_causal=encoder_causal,
1125
+ )
1126
+ self.decoder = LTXVideoDecoder3d(
1127
+ in_channels=latent_channels,
1128
+ out_channels=out_channels,
1129
+ block_out_channels=decoder_block_out_channels,
1130
+ spatio_temporal_scaling=decoder_spatio_temporal_scaling,
1131
+ layers_per_block=decoder_layers_per_block,
1132
+ patch_size=patch_size,
1133
+ patch_size_t=patch_size_t,
1134
+ resnet_norm_eps=resnet_norm_eps,
1135
+ is_causal=decoder_causal,
1136
+ timestep_conditioning=timestep_conditioning,
1137
+ inject_noise=decoder_inject_noise,
1138
+ upsample_residual=upsample_residual,
1139
+ upsample_factor=upsample_factor,
1140
+ )
1141
+
1142
+ latents_mean = torch.zeros((latent_channels,), requires_grad=False)
1143
+ latents_std = torch.ones((latent_channels,), requires_grad=False)
1144
+ self.register_buffer("latents_mean", latents_mean, persistent=True)
1145
+ self.register_buffer("latents_std", latents_std, persistent=True)
1146
+
1147
+ self.spatial_compression_ratio = (
1148
+ patch_size * 2 ** sum(spatio_temporal_scaling)
1149
+ if spatial_compression_ratio is None
1150
+ else spatial_compression_ratio
1151
+ )
1152
+ self.temporal_compression_ratio = (
1153
+ patch_size_t * 2 ** sum(spatio_temporal_scaling)
1154
+ if temporal_compression_ratio is None
1155
+ else temporal_compression_ratio
1156
+ )
1157
+
1158
+ # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension
1159
+ # to perform decoding of a single video latent at a time.
1160
+ self.use_slicing = False
1161
+
1162
+ # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent
1163
+ # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the
1164
+ # intermediate tiles together, the memory requirement can be lowered.
1165
+ self.use_tiling = False
1166
+
1167
+ # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames
1168
+ # at a fixed frame batch size (based on `self.num_latent_frames_batch_sizes`), the memory requirement can be lowered.
1169
+ self.use_framewise_encoding = False
1170
+ self.use_framewise_decoding = False
1171
+
1172
+ # This can be configured based on the amount of GPU memory available.
1173
+ # `16` for sample frames and `2` for latent frames are sensible defaults for consumer GPUs.
1174
+ # Setting it to higher values results in higher memory usage.
1175
+ self.num_sample_frames_batch_size = 16
1176
+ self.num_latent_frames_batch_size = 2
1177
+
1178
+ # The minimal tile height and width for spatial tiling to be used
1179
+ self.tile_sample_min_height = 512
1180
+ self.tile_sample_min_width = 512
1181
+ self.tile_sample_min_num_frames = 16
1182
+
1183
+ # The minimal distance between two spatial tiles
1184
+ self.tile_sample_stride_height = 448
1185
+ self.tile_sample_stride_width = 448
1186
+ self.tile_sample_stride_num_frames = 8
1187
+
1188
+ def enable_tiling(
1189
+ self,
1190
+ tile_sample_min_height: Optional[int] = None,
1191
+ tile_sample_min_width: Optional[int] = None,
1192
+ tile_sample_min_num_frames: Optional[int] = None,
1193
+ tile_sample_stride_height: Optional[float] = None,
1194
+ tile_sample_stride_width: Optional[float] = None,
1195
+ tile_sample_stride_num_frames: Optional[float] = None,
1196
+ ) -> None:
1197
+ r"""
1198
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
1199
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
1200
+ processing larger images.
1201
+
1202
+ Args:
1203
+ tile_sample_min_height (`int`, *optional*):
1204
+ The minimum height required for a sample to be separated into tiles across the height dimension.
1205
+ tile_sample_min_width (`int`, *optional*):
1206
+ The minimum width required for a sample to be separated into tiles across the width dimension.
1207
+ tile_sample_stride_height (`int`, *optional*):
1208
+ The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
1209
+ no tiling artifacts produced across the height dimension.
1210
+ tile_sample_stride_width (`int`, *optional*):
1211
+ The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
1212
+ artifacts produced across the width dimension.
1213
+ """
1214
+ self.use_tiling = True
1215
+ self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
1216
+ self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
1217
+ self.tile_sample_min_num_frames = tile_sample_min_num_frames or self.tile_sample_min_num_frames
1218
+ self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height
1219
+ self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width
1220
+ self.tile_sample_stride_num_frames = tile_sample_stride_num_frames or self.tile_sample_stride_num_frames
1221
+
1222
+ def disable_tiling(self) -> None:
1223
+ r"""
1224
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
1225
+ decoding in one step.
1226
+ """
1227
+ self.use_tiling = False
1228
+
1229
+ def enable_slicing(self) -> None:
1230
+ r"""
1231
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
1232
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
1233
+ """
1234
+ self.use_slicing = True
1235
+
1236
+ def disable_slicing(self) -> None:
1237
+ r"""
1238
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
1239
+ decoding in one step.
1240
+ """
1241
+ self.use_slicing = False
1242
+
1243
+ def _encode(self, x: torch.Tensor) -> torch.Tensor:
1244
+ batch_size, num_channels, num_frames, height, width = x.shape
1245
+
1246
+ if self.use_framewise_decoding and num_frames > self.tile_sample_min_num_frames:
1247
+ return self._temporal_tiled_encode(x)
1248
+
1249
+ if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height):
1250
+ return self.tiled_encode(x)
1251
+
1252
+ enc = self.encoder(x)
1253
+
1254
+ return enc
1255
+
1256
+ @apply_forward_hook
1257
+ def encode(
1258
+ self, x: torch.Tensor, return_dict: bool = True
1259
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
1260
+ """
1261
+ Encode a batch of images into latents.
1262
+
1263
+ Args:
1264
+ x (`torch.Tensor`): Input batch of images.
1265
+ return_dict (`bool`, *optional*, defaults to `True`):
1266
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
1267
+
1268
+ Returns:
1269
+ The latent representations of the encoded videos. If `return_dict` is True, a
1270
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
1271
+ """
1272
+ if self.use_slicing and x.shape[0] > 1:
1273
+ encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
1274
+ h = torch.cat(encoded_slices)
1275
+ else:
1276
+ h = self._encode(x)
1277
+ posterior = DiagonalGaussianDistribution(h)
1278
+
1279
+ if not return_dict:
1280
+ return (posterior,)
1281
+ return AutoencoderKLOutput(latent_dist=posterior)
1282
+
1283
+ def _decode(
1284
+ self, z: torch.Tensor, temb: Optional[torch.Tensor] = None, return_dict: bool = True
1285
+ ) -> Union[DecoderOutput, torch.Tensor]:
1286
+ batch_size, num_channels, num_frames, height, width = z.shape
1287
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1288
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1289
+ tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio
1290
+
1291
+ if self.use_framewise_decoding and num_frames > tile_latent_min_num_frames:
1292
+ return self._temporal_tiled_decode(z, temb, return_dict=return_dict)
1293
+
1294
+ if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height):
1295
+ return self.tiled_decode(z, temb, return_dict=return_dict)
1296
+
1297
+ dec = self.decoder(z, temb)
1298
+
1299
+ if not return_dict:
1300
+ return (dec,)
1301
+
1302
+ return DecoderOutput(sample=dec)
1303
+
1304
+ @apply_forward_hook
1305
+ def decode(
1306
+ self, z: torch.Tensor, temb: Optional[torch.Tensor] = None, return_dict: bool = True
1307
+ ) -> Union[DecoderOutput, torch.Tensor]:
1308
+ """
1309
+ Decode a batch of images.
1310
+
1311
+ Args:
1312
+ z (`torch.Tensor`): Input batch of latent vectors.
1313
+ return_dict (`bool`, *optional*, defaults to `True`):
1314
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
1315
+
1316
+ Returns:
1317
+ [`~models.vae.DecoderOutput`] or `tuple`:
1318
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
1319
+ returned.
1320
+ """
1321
+ if self.use_slicing and z.shape[0] > 1:
1322
+ if temb is not None:
1323
+ decoded_slices = [
1324
+ self._decode(z_slice, t_slice).sample for z_slice, t_slice in (z.split(1), temb.split(1))
1325
+ ]
1326
+ else:
1327
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
1328
+ decoded = torch.cat(decoded_slices)
1329
+ else:
1330
+ decoded = self._decode(z, temb).sample
1331
+
1332
+ if not return_dict:
1333
+ return (decoded,)
1334
+
1335
+ return DecoderOutput(sample=decoded)
1336
+
1337
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
1338
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
1339
+ for y in range(blend_extent):
1340
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
1341
+ y / blend_extent
1342
+ )
1343
+ return b
1344
+
1345
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
1346
+ blend_extent = min(a.shape[4], b.shape[4], blend_extent)
1347
+ for x in range(blend_extent):
1348
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
1349
+ x / blend_extent
1350
+ )
1351
+ return b
1352
+
1353
+ def blend_t(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
1354
+ blend_extent = min(a.shape[-3], b.shape[-3], blend_extent)
1355
+ for x in range(blend_extent):
1356
+ b[:, :, x, :, :] = a[:, :, -blend_extent + x, :, :] * (1 - x / blend_extent) + b[:, :, x, :, :] * (
1357
+ x / blend_extent
1358
+ )
1359
+ return b
1360
+
1361
+ def tiled_encode(self, x: torch.Tensor) -> torch.Tensor:
1362
+ r"""Encode a batch of images using a tiled encoder.
1363
+
1364
+ Args:
1365
+ x (`torch.Tensor`): Input batch of videos.
1366
+
1367
+ Returns:
1368
+ `torch.Tensor`:
1369
+ The latent representation of the encoded videos.
1370
+ """
1371
+ batch_size, num_channels, num_frames, height, width = x.shape
1372
+ latent_height = height // self.spatial_compression_ratio
1373
+ latent_width = width // self.spatial_compression_ratio
1374
+
1375
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1376
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1377
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
1378
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
1379
+
1380
+ blend_height = tile_latent_min_height - tile_latent_stride_height
1381
+ blend_width = tile_latent_min_width - tile_latent_stride_width
1382
+
1383
+ # Split x into overlapping tiles and encode them separately.
1384
+ # The tiles have an overlap to avoid seams between tiles.
1385
+ rows = []
1386
+ for i in range(0, height, self.tile_sample_stride_height):
1387
+ row = []
1388
+ for j in range(0, width, self.tile_sample_stride_width):
1389
+ time = self.encoder(
1390
+ x[:, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width]
1391
+ )
1392
+
1393
+ row.append(time)
1394
+ rows.append(row)
1395
+
1396
+ result_rows = []
1397
+ for i, row in enumerate(rows):
1398
+ result_row = []
1399
+ for j, tile in enumerate(row):
1400
+ # blend the above tile and the left tile
1401
+ # to the current tile and add the current tile to the result row
1402
+ if i > 0:
1403
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
1404
+ if j > 0:
1405
+ tile = self.blend_h(row[j - 1], tile, blend_width)
1406
+ result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width])
1407
+ result_rows.append(torch.cat(result_row, dim=4))
1408
+
1409
+ enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width]
1410
+ return enc
1411
+
1412
+ def tiled_decode(
1413
+ self, z: torch.Tensor, temb: Optional[torch.Tensor], return_dict: bool = True
1414
+ ) -> Union[DecoderOutput, torch.Tensor]:
1415
+ r"""
1416
+ Decode a batch of images using a tiled decoder.
1417
+
1418
+ Args:
1419
+ z (`torch.Tensor`): Input batch of latent vectors.
1420
+ return_dict (`bool`, *optional*, defaults to `True`):
1421
+ Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
1422
+
1423
+ Returns:
1424
+ [`~models.vae.DecoderOutput`] or `tuple`:
1425
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
1426
+ returned.
1427
+ """
1428
+
1429
+ batch_size, num_channels, num_frames, height, width = z.shape
1430
+ sample_height = height * self.spatial_compression_ratio
1431
+ sample_width = width * self.spatial_compression_ratio
1432
+
1433
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1434
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1435
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
1436
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
1437
+
1438
+ blend_height = self.tile_sample_min_height - self.tile_sample_stride_height
1439
+ blend_width = self.tile_sample_min_width - self.tile_sample_stride_width
1440
+
1441
+ # Split z into overlapping tiles and decode them separately.
1442
+ # The tiles have an overlap to avoid seams between tiles.
1443
+ rows = []
1444
+ for i in range(0, height, tile_latent_stride_height):
1445
+ row = []
1446
+ for j in range(0, width, tile_latent_stride_width):
1447
+ time = self.decoder(z[:, :, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width], temb)
1448
+
1449
+ row.append(time)
1450
+ rows.append(row)
1451
+
1452
+ result_rows = []
1453
+ for i, row in enumerate(rows):
1454
+ result_row = []
1455
+ for j, tile in enumerate(row):
1456
+ # blend the above tile and the left tile
1457
+ # to the current tile and add the current tile to the result row
1458
+ if i > 0:
1459
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
1460
+ if j > 0:
1461
+ tile = self.blend_h(row[j - 1], tile, blend_width)
1462
+ result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width])
1463
+ result_rows.append(torch.cat(result_row, dim=4))
1464
+
1465
+ dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width]
1466
+
1467
+ if not return_dict:
1468
+ return (dec,)
1469
+
1470
+ return DecoderOutput(sample=dec)
1471
+
1472
+ def _temporal_tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput:
1473
+ batch_size, num_channels, num_frames, height, width = x.shape
1474
+ latent_num_frames = (num_frames - 1) // self.temporal_compression_ratio + 1
1475
+
1476
+ tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio
1477
+ tile_latent_stride_num_frames = self.tile_sample_stride_num_frames // self.temporal_compression_ratio
1478
+ blend_num_frames = tile_latent_min_num_frames - tile_latent_stride_num_frames
1479
+
1480
+ row = []
1481
+ for i in range(0, num_frames, self.tile_sample_stride_num_frames):
1482
+ tile = x[:, :, i : i + self.tile_sample_min_num_frames + 1, :, :]
1483
+ if self.use_tiling and (height > self.tile_sample_min_height or width > self.tile_sample_min_width):
1484
+ tile = self.tiled_encode(tile)
1485
+ else:
1486
+ tile = self.encoder(tile)
1487
+ if i > 0:
1488
+ tile = tile[:, :, 1:, :, :]
1489
+ row.append(tile)
1490
+
1491
+ result_row = []
1492
+ for i, tile in enumerate(row):
1493
+ if i > 0:
1494
+ tile = self.blend_t(row[i - 1], tile, blend_num_frames)
1495
+ result_row.append(tile[:, :, :tile_latent_stride_num_frames, :, :])
1496
+ else:
1497
+ result_row.append(tile[:, :, : tile_latent_stride_num_frames + 1, :, :])
1498
+
1499
+ enc = torch.cat(result_row, dim=2)[:, :, :latent_num_frames]
1500
+ return enc
1501
+
1502
+ def _temporal_tiled_decode(
1503
+ self, z: torch.Tensor, temb: Optional[torch.Tensor], return_dict: bool = True
1504
+ ) -> Union[DecoderOutput, torch.Tensor]:
1505
+ batch_size, num_channels, num_frames, height, width = z.shape
1506
+ num_sample_frames = (num_frames - 1) * self.temporal_compression_ratio + 1
1507
+
1508
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1509
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1510
+ tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio
1511
+ tile_latent_stride_num_frames = self.tile_sample_stride_num_frames // self.temporal_compression_ratio
1512
+ blend_num_frames = self.tile_sample_min_num_frames - self.tile_sample_stride_num_frames
1513
+
1514
+ row = []
1515
+ for i in range(0, num_frames, tile_latent_stride_num_frames):
1516
+ tile = z[:, :, i : i + tile_latent_min_num_frames + 1, :, :]
1517
+ if self.use_tiling and (tile.shape[-1] > tile_latent_min_width or tile.shape[-2] > tile_latent_min_height):
1518
+ decoded = self.tiled_decode(tile, temb, return_dict=True).sample
1519
+ else:
1520
+ decoded = self.decoder(tile, temb)
1521
+ if i > 0:
1522
+ decoded = decoded[:, :, :-1, :, :]
1523
+ row.append(decoded)
1524
+
1525
+ result_row = []
1526
+ for i, tile in enumerate(row):
1527
+ if i > 0:
1528
+ tile = self.blend_t(row[i - 1], tile, blend_num_frames)
1529
+ tile = tile[:, :, : self.tile_sample_stride_num_frames, :, :]
1530
+ result_row.append(tile)
1531
+ else:
1532
+ result_row.append(tile[:, :, : self.tile_sample_stride_num_frames + 1, :, :])
1533
+
1534
+ dec = torch.cat(result_row, dim=2)[:, :, :num_sample_frames]
1535
+
1536
+ if not return_dict:
1537
+ return (dec,)
1538
+ return DecoderOutput(sample=dec)
1539
+
1540
+ def forward(
1541
+ self,
1542
+ sample: torch.Tensor,
1543
+ temb: Optional[torch.Tensor] = None,
1544
+ sample_posterior: bool = False,
1545
+ return_dict: bool = True,
1546
+ generator: Optional[torch.Generator] = None,
1547
+ ) -> Union[torch.Tensor, torch.Tensor]:
1548
+ x = sample
1549
+ posterior = self.encode(x).latent_dist
1550
+ if sample_posterior:
1551
+ z = posterior.sample(generator=generator)
1552
+ else:
1553
+ z = posterior.mode()
1554
+ dec = self.decode(z, temb)
1555
+ if not return_dict:
1556
+ return (dec.sample,)
1557
+ return dec
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_magvit.py ADDED
@@ -0,0 +1,1094 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The EasyAnimate team and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import math
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from ...configuration_utils import ConfigMixin, register_to_config
24
+ from ...utils import logging
25
+ from ...utils.accelerate_utils import apply_forward_hook
26
+ from ..activations import get_activation
27
+ from ..modeling_outputs import AutoencoderKLOutput
28
+ from ..modeling_utils import ModelMixin
29
+ from .vae import DecoderOutput, DiagonalGaussianDistribution
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+
35
+ class EasyAnimateCausalConv3d(nn.Conv3d):
36
+ def __init__(
37
+ self,
38
+ in_channels: int,
39
+ out_channels: int,
40
+ kernel_size: Union[int, Tuple[int, ...]] = 3,
41
+ stride: Union[int, Tuple[int, ...]] = 1,
42
+ padding: Union[int, Tuple[int, ...]] = 1,
43
+ dilation: Union[int, Tuple[int, ...]] = 1,
44
+ groups: int = 1,
45
+ bias: bool = True,
46
+ padding_mode: str = "zeros",
47
+ ):
48
+ # Ensure kernel_size, stride, and dilation are tuples of length 3
49
+ kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size,) * 3
50
+ assert len(kernel_size) == 3, f"Kernel size must be a 3-tuple, got {kernel_size} instead."
51
+
52
+ stride = stride if isinstance(stride, tuple) else (stride,) * 3
53
+ assert len(stride) == 3, f"Stride must be a 3-tuple, got {stride} instead."
54
+
55
+ dilation = dilation if isinstance(dilation, tuple) else (dilation,) * 3
56
+ assert len(dilation) == 3, f"Dilation must be a 3-tuple, got {dilation} instead."
57
+
58
+ # Unpack kernel size, stride, and dilation for temporal, height, and width dimensions
59
+ t_ks, h_ks, w_ks = kernel_size
60
+ self.t_stride, h_stride, w_stride = stride
61
+ t_dilation, h_dilation, w_dilation = dilation
62
+
63
+ # Calculate padding for temporal dimension to maintain causality
64
+ t_pad = (t_ks - 1) * t_dilation
65
+
66
+ # Calculate padding for height and width dimensions based on the padding parameter
67
+ if padding is None:
68
+ h_pad = math.ceil(((h_ks - 1) * h_dilation + (1 - h_stride)) / 2)
69
+ w_pad = math.ceil(((w_ks - 1) * w_dilation + (1 - w_stride)) / 2)
70
+ elif isinstance(padding, int):
71
+ h_pad = w_pad = padding
72
+ else:
73
+ assert NotImplementedError
74
+
75
+ # Store temporal padding and initialize flags and previous features cache
76
+ self.temporal_padding = t_pad
77
+ self.temporal_padding_origin = math.ceil(((t_ks - 1) * w_dilation + (1 - w_stride)) / 2)
78
+
79
+ self.prev_features = None
80
+
81
+ # Initialize the parent class with modified padding
82
+ super().__init__(
83
+ in_channels=in_channels,
84
+ out_channels=out_channels,
85
+ kernel_size=kernel_size,
86
+ stride=stride,
87
+ dilation=dilation,
88
+ padding=(0, h_pad, w_pad),
89
+ groups=groups,
90
+ bias=bias,
91
+ padding_mode=padding_mode,
92
+ )
93
+
94
+ def _clear_conv_cache(self):
95
+ del self.prev_features
96
+ self.prev_features = None
97
+
98
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
99
+ # Ensure input tensor is of the correct type
100
+ dtype = hidden_states.dtype
101
+ if self.prev_features is None:
102
+ # Pad the input tensor in the temporal dimension to maintain causality
103
+ hidden_states = F.pad(
104
+ hidden_states,
105
+ pad=(0, 0, 0, 0, self.temporal_padding, 0),
106
+ mode="replicate", # TODO: check if this is necessary
107
+ )
108
+ hidden_states = hidden_states.to(dtype=dtype)
109
+
110
+ # Clear cache before processing and store previous features for causality
111
+ self._clear_conv_cache()
112
+ self.prev_features = hidden_states[:, :, -self.temporal_padding :].clone()
113
+
114
+ # Process the input tensor in chunks along the temporal dimension
115
+ num_frames = hidden_states.size(2)
116
+ outputs = []
117
+ i = 0
118
+ while i + self.temporal_padding + 1 <= num_frames:
119
+ out = super().forward(hidden_states[:, :, i : i + self.temporal_padding + 1])
120
+ i += self.t_stride
121
+ outputs.append(out)
122
+ return torch.concat(outputs, 2)
123
+ else:
124
+ # Concatenate previous features with the input tensor for continuous temporal processing
125
+ if self.t_stride == 2:
126
+ hidden_states = torch.concat(
127
+ [self.prev_features[:, :, -(self.temporal_padding - 1) :], hidden_states], dim=2
128
+ )
129
+ else:
130
+ hidden_states = torch.concat([self.prev_features, hidden_states], dim=2)
131
+ hidden_states = hidden_states.to(dtype=dtype)
132
+
133
+ # Clear cache and update previous features
134
+ self._clear_conv_cache()
135
+ self.prev_features = hidden_states[:, :, -self.temporal_padding :].clone()
136
+
137
+ # Process the concatenated tensor in chunks along the temporal dimension
138
+ num_frames = hidden_states.size(2)
139
+ outputs = []
140
+ i = 0
141
+ while i + self.temporal_padding + 1 <= num_frames:
142
+ out = super().forward(hidden_states[:, :, i : i + self.temporal_padding + 1])
143
+ i += self.t_stride
144
+ outputs.append(out)
145
+ return torch.concat(outputs, 2)
146
+
147
+
148
+ class EasyAnimateResidualBlock3D(nn.Module):
149
+ def __init__(
150
+ self,
151
+ in_channels: int,
152
+ out_channels: int,
153
+ non_linearity: str = "silu",
154
+ norm_num_groups: int = 32,
155
+ norm_eps: float = 1e-6,
156
+ spatial_group_norm: bool = True,
157
+ dropout: float = 0.0,
158
+ output_scale_factor: float = 1.0,
159
+ ):
160
+ super().__init__()
161
+
162
+ self.output_scale_factor = output_scale_factor
163
+
164
+ # Group normalization for input tensor
165
+ self.norm1 = nn.GroupNorm(
166
+ num_groups=norm_num_groups,
167
+ num_channels=in_channels,
168
+ eps=norm_eps,
169
+ affine=True,
170
+ )
171
+ self.nonlinearity = get_activation(non_linearity)
172
+ self.conv1 = EasyAnimateCausalConv3d(in_channels, out_channels, kernel_size=3)
173
+
174
+ self.norm2 = nn.GroupNorm(num_groups=norm_num_groups, num_channels=out_channels, eps=norm_eps, affine=True)
175
+ self.dropout = nn.Dropout(dropout)
176
+ self.conv2 = EasyAnimateCausalConv3d(out_channels, out_channels, kernel_size=3)
177
+
178
+ if in_channels != out_channels:
179
+ self.shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1)
180
+ else:
181
+ self.shortcut = nn.Identity()
182
+
183
+ self.spatial_group_norm = spatial_group_norm
184
+
185
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
186
+ shortcut = self.shortcut(hidden_states)
187
+
188
+ if self.spatial_group_norm:
189
+ batch_size = hidden_states.size(0)
190
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W]
191
+ hidden_states = self.norm1(hidden_states)
192
+ hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(
193
+ 0, 2, 1, 3, 4
194
+ ) # [B * T, C, H, W] -> [B, C, T, H, W]
195
+ else:
196
+ hidden_states = self.norm1(hidden_states)
197
+
198
+ hidden_states = self.nonlinearity(hidden_states)
199
+ hidden_states = self.conv1(hidden_states)
200
+
201
+ if self.spatial_group_norm:
202
+ batch_size = hidden_states.size(0)
203
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W]
204
+ hidden_states = self.norm2(hidden_states)
205
+ hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(
206
+ 0, 2, 1, 3, 4
207
+ ) # [B * T, C, H, W] -> [B, C, T, H, W]
208
+ else:
209
+ hidden_states = self.norm2(hidden_states)
210
+
211
+ hidden_states = self.nonlinearity(hidden_states)
212
+ hidden_states = self.dropout(hidden_states)
213
+ hidden_states = self.conv2(hidden_states)
214
+
215
+ return (hidden_states + shortcut) / self.output_scale_factor
216
+
217
+
218
+ class EasyAnimateDownsampler3D(nn.Module):
219
+ def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: tuple = (2, 2, 2)):
220
+ super().__init__()
221
+
222
+ self.conv = EasyAnimateCausalConv3d(
223
+ in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=0
224
+ )
225
+
226
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
227
+ hidden_states = F.pad(hidden_states, (0, 1, 0, 1))
228
+ hidden_states = self.conv(hidden_states)
229
+ return hidden_states
230
+
231
+
232
+ class EasyAnimateUpsampler3D(nn.Module):
233
+ def __init__(
234
+ self,
235
+ in_channels: int,
236
+ out_channels: int,
237
+ kernel_size: int = 3,
238
+ temporal_upsample: bool = False,
239
+ spatial_group_norm: bool = True,
240
+ ):
241
+ super().__init__()
242
+ out_channels = out_channels or in_channels
243
+
244
+ self.temporal_upsample = temporal_upsample
245
+ self.spatial_group_norm = spatial_group_norm
246
+
247
+ self.conv = EasyAnimateCausalConv3d(
248
+ in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size
249
+ )
250
+ self.prev_features = None
251
+
252
+ def _clear_conv_cache(self):
253
+ del self.prev_features
254
+ self.prev_features = None
255
+
256
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
257
+ hidden_states = F.interpolate(hidden_states, scale_factor=(1, 2, 2), mode="nearest")
258
+ hidden_states = self.conv(hidden_states)
259
+
260
+ if self.temporal_upsample:
261
+ if self.prev_features is None:
262
+ self.prev_features = hidden_states
263
+ else:
264
+ hidden_states = F.interpolate(
265
+ hidden_states,
266
+ scale_factor=(2, 1, 1),
267
+ mode="trilinear" if not self.spatial_group_norm else "nearest",
268
+ )
269
+ return hidden_states
270
+
271
+
272
+ class EasyAnimateDownBlock3D(nn.Module):
273
+ def __init__(
274
+ self,
275
+ in_channels: int,
276
+ out_channels: int,
277
+ num_layers: int = 1,
278
+ act_fn: str = "silu",
279
+ norm_num_groups: int = 32,
280
+ norm_eps: float = 1e-6,
281
+ spatial_group_norm: bool = True,
282
+ dropout: float = 0.0,
283
+ output_scale_factor: float = 1.0,
284
+ add_downsample: bool = True,
285
+ add_temporal_downsample: bool = True,
286
+ ):
287
+ super().__init__()
288
+
289
+ self.convs = nn.ModuleList([])
290
+ for i in range(num_layers):
291
+ in_channels = in_channels if i == 0 else out_channels
292
+ self.convs.append(
293
+ EasyAnimateResidualBlock3D(
294
+ in_channels=in_channels,
295
+ out_channels=out_channels,
296
+ non_linearity=act_fn,
297
+ norm_num_groups=norm_num_groups,
298
+ norm_eps=norm_eps,
299
+ spatial_group_norm=spatial_group_norm,
300
+ dropout=dropout,
301
+ output_scale_factor=output_scale_factor,
302
+ )
303
+ )
304
+
305
+ if add_downsample and add_temporal_downsample:
306
+ self.downsampler = EasyAnimateDownsampler3D(out_channels, out_channels, kernel_size=3, stride=(2, 2, 2))
307
+ self.spatial_downsample_factor = 2
308
+ self.temporal_downsample_factor = 2
309
+ elif add_downsample and not add_temporal_downsample:
310
+ self.downsampler = EasyAnimateDownsampler3D(out_channels, out_channels, kernel_size=3, stride=(1, 2, 2))
311
+ self.spatial_downsample_factor = 2
312
+ self.temporal_downsample_factor = 1
313
+ else:
314
+ self.downsampler = None
315
+ self.spatial_downsample_factor = 1
316
+ self.temporal_downsample_factor = 1
317
+
318
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
319
+ for conv in self.convs:
320
+ hidden_states = conv(hidden_states)
321
+ if self.downsampler is not None:
322
+ hidden_states = self.downsampler(hidden_states)
323
+ return hidden_states
324
+
325
+
326
+ class EasyAnimateUpBlock3d(nn.Module):
327
+ def __init__(
328
+ self,
329
+ in_channels: int,
330
+ out_channels: int,
331
+ num_layers: int = 1,
332
+ act_fn: str = "silu",
333
+ norm_num_groups: int = 32,
334
+ norm_eps: float = 1e-6,
335
+ spatial_group_norm: bool = False,
336
+ dropout: float = 0.0,
337
+ output_scale_factor: float = 1.0,
338
+ add_upsample: bool = True,
339
+ add_temporal_upsample: bool = True,
340
+ ):
341
+ super().__init__()
342
+
343
+ self.convs = nn.ModuleList([])
344
+ for i in range(num_layers):
345
+ in_channels = in_channels if i == 0 else out_channels
346
+ self.convs.append(
347
+ EasyAnimateResidualBlock3D(
348
+ in_channels=in_channels,
349
+ out_channels=out_channels,
350
+ non_linearity=act_fn,
351
+ norm_num_groups=norm_num_groups,
352
+ norm_eps=norm_eps,
353
+ spatial_group_norm=spatial_group_norm,
354
+ dropout=dropout,
355
+ output_scale_factor=output_scale_factor,
356
+ )
357
+ )
358
+
359
+ if add_upsample:
360
+ self.upsampler = EasyAnimateUpsampler3D(
361
+ in_channels,
362
+ in_channels,
363
+ temporal_upsample=add_temporal_upsample,
364
+ spatial_group_norm=spatial_group_norm,
365
+ )
366
+ else:
367
+ self.upsampler = None
368
+
369
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
370
+ for conv in self.convs:
371
+ hidden_states = conv(hidden_states)
372
+ if self.upsampler is not None:
373
+ hidden_states = self.upsampler(hidden_states)
374
+ return hidden_states
375
+
376
+
377
+ class EasyAnimateMidBlock3d(nn.Module):
378
+ def __init__(
379
+ self,
380
+ in_channels: int,
381
+ num_layers: int = 1,
382
+ act_fn: str = "silu",
383
+ norm_num_groups: int = 32,
384
+ norm_eps: float = 1e-6,
385
+ spatial_group_norm: bool = True,
386
+ dropout: float = 0.0,
387
+ output_scale_factor: float = 1.0,
388
+ ):
389
+ super().__init__()
390
+
391
+ norm_num_groups = norm_num_groups if norm_num_groups is not None else min(in_channels // 4, 32)
392
+
393
+ self.convs = nn.ModuleList(
394
+ [
395
+ EasyAnimateResidualBlock3D(
396
+ in_channels=in_channels,
397
+ out_channels=in_channels,
398
+ non_linearity=act_fn,
399
+ norm_num_groups=norm_num_groups,
400
+ norm_eps=norm_eps,
401
+ spatial_group_norm=spatial_group_norm,
402
+ dropout=dropout,
403
+ output_scale_factor=output_scale_factor,
404
+ )
405
+ ]
406
+ )
407
+
408
+ for _ in range(num_layers - 1):
409
+ self.convs.append(
410
+ EasyAnimateResidualBlock3D(
411
+ in_channels=in_channels,
412
+ out_channels=in_channels,
413
+ non_linearity=act_fn,
414
+ norm_num_groups=norm_num_groups,
415
+ norm_eps=norm_eps,
416
+ spatial_group_norm=spatial_group_norm,
417
+ dropout=dropout,
418
+ output_scale_factor=output_scale_factor,
419
+ )
420
+ )
421
+
422
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
423
+ hidden_states = self.convs[0](hidden_states)
424
+ for resnet in self.convs[1:]:
425
+ hidden_states = resnet(hidden_states)
426
+ return hidden_states
427
+
428
+
429
+ class EasyAnimateEncoder(nn.Module):
430
+ r"""
431
+ Causal encoder for 3D video-like data used in [EasyAnimate](https://huggingface.co/papers/2405.18991).
432
+ """
433
+
434
+ _supports_gradient_checkpointing = True
435
+
436
+ def __init__(
437
+ self,
438
+ in_channels: int = 3,
439
+ out_channels: int = 8,
440
+ down_block_types: Tuple[str, ...] = (
441
+ "SpatialDownBlock3D",
442
+ "SpatialTemporalDownBlock3D",
443
+ "SpatialTemporalDownBlock3D",
444
+ "SpatialTemporalDownBlock3D",
445
+ ),
446
+ block_out_channels: Tuple[int, ...] = [128, 256, 512, 512],
447
+ layers_per_block: int = 2,
448
+ norm_num_groups: int = 32,
449
+ act_fn: str = "silu",
450
+ double_z: bool = True,
451
+ spatial_group_norm: bool = False,
452
+ ):
453
+ super().__init__()
454
+
455
+ # 1. Input convolution
456
+ self.conv_in = EasyAnimateCausalConv3d(in_channels, block_out_channels[0], kernel_size=3)
457
+
458
+ # 2. Down blocks
459
+ self.down_blocks = nn.ModuleList([])
460
+ output_channels = block_out_channels[0]
461
+ for i, down_block_type in enumerate(down_block_types):
462
+ input_channels = output_channels
463
+ output_channels = block_out_channels[i]
464
+ is_final_block = i == len(block_out_channels) - 1
465
+ if down_block_type == "SpatialDownBlock3D":
466
+ down_block = EasyAnimateDownBlock3D(
467
+ in_channels=input_channels,
468
+ out_channels=output_channels,
469
+ num_layers=layers_per_block,
470
+ act_fn=act_fn,
471
+ norm_num_groups=norm_num_groups,
472
+ norm_eps=1e-6,
473
+ spatial_group_norm=spatial_group_norm,
474
+ add_downsample=not is_final_block,
475
+ add_temporal_downsample=False,
476
+ )
477
+ elif down_block_type == "SpatialTemporalDownBlock3D":
478
+ down_block = EasyAnimateDownBlock3D(
479
+ in_channels=input_channels,
480
+ out_channels=output_channels,
481
+ num_layers=layers_per_block,
482
+ act_fn=act_fn,
483
+ norm_num_groups=norm_num_groups,
484
+ norm_eps=1e-6,
485
+ spatial_group_norm=spatial_group_norm,
486
+ add_downsample=not is_final_block,
487
+ add_temporal_downsample=True,
488
+ )
489
+ else:
490
+ raise ValueError(f"Unknown up block type: {down_block_type}")
491
+ self.down_blocks.append(down_block)
492
+
493
+ # 3. Middle block
494
+ self.mid_block = EasyAnimateMidBlock3d(
495
+ in_channels=block_out_channels[-1],
496
+ num_layers=layers_per_block,
497
+ act_fn=act_fn,
498
+ spatial_group_norm=spatial_group_norm,
499
+ norm_num_groups=norm_num_groups,
500
+ norm_eps=1e-6,
501
+ dropout=0,
502
+ output_scale_factor=1,
503
+ )
504
+
505
+ # 4. Output normalization & convolution
506
+ self.spatial_group_norm = spatial_group_norm
507
+ self.conv_norm_out = nn.GroupNorm(
508
+ num_channels=block_out_channels[-1],
509
+ num_groups=norm_num_groups,
510
+ eps=1e-6,
511
+ )
512
+ self.conv_act = get_activation(act_fn)
513
+
514
+ # Initialize the output convolution layer
515
+ conv_out_channels = 2 * out_channels if double_z else out_channels
516
+ self.conv_out = EasyAnimateCausalConv3d(block_out_channels[-1], conv_out_channels, kernel_size=3)
517
+
518
+ self.gradient_checkpointing = False
519
+
520
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
521
+ # hidden_states: (B, C, T, H, W)
522
+ hidden_states = self.conv_in(hidden_states)
523
+
524
+ for down_block in self.down_blocks:
525
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
526
+ hidden_states = self._gradient_checkpointing_func(down_block, hidden_states)
527
+ else:
528
+ hidden_states = down_block(hidden_states)
529
+
530
+ hidden_states = self.mid_block(hidden_states)
531
+
532
+ if self.spatial_group_norm:
533
+ batch_size = hidden_states.size(0)
534
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1)
535
+ hidden_states = self.conv_norm_out(hidden_states)
536
+ hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(0, 2, 1, 3, 4)
537
+ else:
538
+ hidden_states = self.conv_norm_out(hidden_states)
539
+
540
+ hidden_states = self.conv_act(hidden_states)
541
+ hidden_states = self.conv_out(hidden_states)
542
+ return hidden_states
543
+
544
+
545
+ class EasyAnimateDecoder(nn.Module):
546
+ r"""
547
+ Causal decoder for 3D video-like data used in [EasyAnimate](https://huggingface.co/papers/2405.18991).
548
+ """
549
+
550
+ _supports_gradient_checkpointing = True
551
+
552
+ def __init__(
553
+ self,
554
+ in_channels: int = 8,
555
+ out_channels: int = 3,
556
+ up_block_types: Tuple[str, ...] = (
557
+ "SpatialUpBlock3D",
558
+ "SpatialTemporalUpBlock3D",
559
+ "SpatialTemporalUpBlock3D",
560
+ "SpatialTemporalUpBlock3D",
561
+ ),
562
+ block_out_channels: Tuple[int, ...] = [128, 256, 512, 512],
563
+ layers_per_block: int = 2,
564
+ norm_num_groups: int = 32,
565
+ act_fn: str = "silu",
566
+ spatial_group_norm: bool = False,
567
+ ):
568
+ super().__init__()
569
+
570
+ # 1. Input convolution
571
+ self.conv_in = EasyAnimateCausalConv3d(in_channels, block_out_channels[-1], kernel_size=3)
572
+
573
+ # 2. Middle block
574
+ self.mid_block = EasyAnimateMidBlock3d(
575
+ in_channels=block_out_channels[-1],
576
+ num_layers=layers_per_block,
577
+ act_fn=act_fn,
578
+ norm_num_groups=norm_num_groups,
579
+ norm_eps=1e-6,
580
+ dropout=0,
581
+ output_scale_factor=1,
582
+ )
583
+
584
+ # 3. Up blocks
585
+ self.up_blocks = nn.ModuleList([])
586
+ reversed_block_out_channels = list(reversed(block_out_channels))
587
+ output_channels = reversed_block_out_channels[0]
588
+ for i, up_block_type in enumerate(up_block_types):
589
+ input_channels = output_channels
590
+ output_channels = reversed_block_out_channels[i]
591
+ is_final_block = i == len(block_out_channels) - 1
592
+
593
+ # Create and append up block to up_blocks
594
+ if up_block_type == "SpatialUpBlock3D":
595
+ up_block = EasyAnimateUpBlock3d(
596
+ in_channels=input_channels,
597
+ out_channels=output_channels,
598
+ num_layers=layers_per_block + 1,
599
+ act_fn=act_fn,
600
+ norm_num_groups=norm_num_groups,
601
+ norm_eps=1e-6,
602
+ spatial_group_norm=spatial_group_norm,
603
+ add_upsample=not is_final_block,
604
+ add_temporal_upsample=False,
605
+ )
606
+ elif up_block_type == "SpatialTemporalUpBlock3D":
607
+ up_block = EasyAnimateUpBlock3d(
608
+ in_channels=input_channels,
609
+ out_channels=output_channels,
610
+ num_layers=layers_per_block + 1,
611
+ act_fn=act_fn,
612
+ norm_num_groups=norm_num_groups,
613
+ norm_eps=1e-6,
614
+ spatial_group_norm=spatial_group_norm,
615
+ add_upsample=not is_final_block,
616
+ add_temporal_upsample=True,
617
+ )
618
+ else:
619
+ raise ValueError(f"Unknown up block type: {up_block_type}")
620
+ self.up_blocks.append(up_block)
621
+
622
+ # Output normalization and activation
623
+ self.spatial_group_norm = spatial_group_norm
624
+ self.conv_norm_out = nn.GroupNorm(
625
+ num_channels=block_out_channels[0],
626
+ num_groups=norm_num_groups,
627
+ eps=1e-6,
628
+ )
629
+ self.conv_act = get_activation(act_fn)
630
+
631
+ # Output convolution layer
632
+ self.conv_out = EasyAnimateCausalConv3d(block_out_channels[0], out_channels, kernel_size=3)
633
+
634
+ self.gradient_checkpointing = False
635
+
636
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
637
+ # hidden_states: (B, C, T, H, W)
638
+ hidden_states = self.conv_in(hidden_states)
639
+
640
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
641
+ hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states)
642
+ else:
643
+ hidden_states = self.mid_block(hidden_states)
644
+
645
+ for up_block in self.up_blocks:
646
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
647
+ hidden_states = self._gradient_checkpointing_func(up_block, hidden_states)
648
+ else:
649
+ hidden_states = up_block(hidden_states)
650
+
651
+ if self.spatial_group_norm:
652
+ batch_size = hidden_states.size(0)
653
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W]
654
+ hidden_states = self.conv_norm_out(hidden_states)
655
+ hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(
656
+ 0, 2, 1, 3, 4
657
+ ) # [B * T, C, H, W] -> [B, C, T, H, W]
658
+ else:
659
+ hidden_states = self.conv_norm_out(hidden_states)
660
+
661
+ hidden_states = self.conv_act(hidden_states)
662
+ hidden_states = self.conv_out(hidden_states)
663
+ return hidden_states
664
+
665
+
666
+ class AutoencoderKLMagvit(ModelMixin, ConfigMixin):
667
+ r"""
668
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This
669
+ model is used in [EasyAnimate](https://huggingface.co/papers/2405.18991).
670
+
671
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
672
+ for all models (such as downloading or saving).
673
+ """
674
+
675
+ _supports_gradient_checkpointing = True
676
+
677
+ @register_to_config
678
+ def __init__(
679
+ self,
680
+ in_channels: int = 3,
681
+ latent_channels: int = 16,
682
+ out_channels: int = 3,
683
+ block_out_channels: Tuple[int, ...] = [128, 256, 512, 512],
684
+ down_block_types: Tuple[str, ...] = [
685
+ "SpatialDownBlock3D",
686
+ "SpatialTemporalDownBlock3D",
687
+ "SpatialTemporalDownBlock3D",
688
+ "SpatialTemporalDownBlock3D",
689
+ ],
690
+ up_block_types: Tuple[str, ...] = [
691
+ "SpatialUpBlock3D",
692
+ "SpatialTemporalUpBlock3D",
693
+ "SpatialTemporalUpBlock3D",
694
+ "SpatialTemporalUpBlock3D",
695
+ ],
696
+ layers_per_block: int = 2,
697
+ act_fn: str = "silu",
698
+ norm_num_groups: int = 32,
699
+ scaling_factor: float = 0.7125,
700
+ spatial_group_norm: bool = True,
701
+ ):
702
+ super().__init__()
703
+
704
+ # Initialize the encoder
705
+ self.encoder = EasyAnimateEncoder(
706
+ in_channels=in_channels,
707
+ out_channels=latent_channels,
708
+ down_block_types=down_block_types,
709
+ block_out_channels=block_out_channels,
710
+ layers_per_block=layers_per_block,
711
+ norm_num_groups=norm_num_groups,
712
+ act_fn=act_fn,
713
+ double_z=True,
714
+ spatial_group_norm=spatial_group_norm,
715
+ )
716
+
717
+ # Initialize the decoder
718
+ self.decoder = EasyAnimateDecoder(
719
+ in_channels=latent_channels,
720
+ out_channels=out_channels,
721
+ up_block_types=up_block_types,
722
+ block_out_channels=block_out_channels,
723
+ layers_per_block=layers_per_block,
724
+ norm_num_groups=norm_num_groups,
725
+ act_fn=act_fn,
726
+ spatial_group_norm=spatial_group_norm,
727
+ )
728
+
729
+ # Initialize convolution layers for quantization and post-quantization
730
+ self.quant_conv = nn.Conv3d(2 * latent_channels, 2 * latent_channels, kernel_size=1)
731
+ self.post_quant_conv = nn.Conv3d(latent_channels, latent_channels, kernel_size=1)
732
+
733
+ self.spatial_compression_ratio = 2 ** (len(block_out_channels) - 1)
734
+ self.temporal_compression_ratio = 2 ** (len(block_out_channels) - 2)
735
+
736
+ # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension
737
+ # to perform decoding of a single video latent at a time.
738
+ self.use_slicing = False
739
+
740
+ # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent
741
+ # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the
742
+ # intermediate tiles together, the memory requirement can be lowered.
743
+ self.use_tiling = False
744
+
745
+ # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames
746
+ # at a fixed frame batch size (based on `self.num_latent_frames_batch_size`), the memory requirement can be lowered.
747
+ self.use_framewise_encoding = False
748
+ self.use_framewise_decoding = False
749
+
750
+ # Assign mini-batch sizes for encoder and decoder
751
+ self.num_sample_frames_batch_size = 4
752
+ self.num_latent_frames_batch_size = 1
753
+
754
+ # The minimal tile height and width for spatial tiling to be used
755
+ self.tile_sample_min_height = 512
756
+ self.tile_sample_min_width = 512
757
+ self.tile_sample_min_num_frames = 4
758
+
759
+ # The minimal distance between two spatial tiles
760
+ self.tile_sample_stride_height = 448
761
+ self.tile_sample_stride_width = 448
762
+ self.tile_sample_stride_num_frames = 8
763
+
764
+ def _clear_conv_cache(self):
765
+ # Clear cache for convolutional layers if needed
766
+ for name, module in self.named_modules():
767
+ if isinstance(module, EasyAnimateCausalConv3d):
768
+ module._clear_conv_cache()
769
+ if isinstance(module, EasyAnimateUpsampler3D):
770
+ module._clear_conv_cache()
771
+
772
+ def enable_tiling(
773
+ self,
774
+ tile_sample_min_height: Optional[int] = None,
775
+ tile_sample_min_width: Optional[int] = None,
776
+ tile_sample_min_num_frames: Optional[int] = None,
777
+ tile_sample_stride_height: Optional[float] = None,
778
+ tile_sample_stride_width: Optional[float] = None,
779
+ tile_sample_stride_num_frames: Optional[float] = None,
780
+ ) -> None:
781
+ r"""
782
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
783
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
784
+ processing larger images.
785
+
786
+ Args:
787
+ tile_sample_min_height (`int`, *optional*):
788
+ The minimum height required for a sample to be separated into tiles across the height dimension.
789
+ tile_sample_min_width (`int`, *optional*):
790
+ The minimum width required for a sample to be separated into tiles across the width dimension.
791
+ tile_sample_stride_height (`int`, *optional*):
792
+ The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
793
+ no tiling artifacts produced across the height dimension.
794
+ tile_sample_stride_width (`int`, *optional*):
795
+ The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
796
+ artifacts produced across the width dimension.
797
+ """
798
+ self.use_tiling = True
799
+ self.use_framewise_decoding = True
800
+ self.use_framewise_encoding = True
801
+ self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
802
+ self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
803
+ self.tile_sample_min_num_frames = tile_sample_min_num_frames or self.tile_sample_min_num_frames
804
+ self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height
805
+ self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width
806
+ self.tile_sample_stride_num_frames = tile_sample_stride_num_frames or self.tile_sample_stride_num_frames
807
+
808
+ def disable_tiling(self) -> None:
809
+ r"""
810
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
811
+ decoding in one step.
812
+ """
813
+ self.use_tiling = False
814
+
815
+ def enable_slicing(self) -> None:
816
+ r"""
817
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
818
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
819
+ """
820
+ self.use_slicing = True
821
+
822
+ def disable_slicing(self) -> None:
823
+ r"""
824
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
825
+ decoding in one step.
826
+ """
827
+ self.use_slicing = False
828
+
829
+ @apply_forward_hook
830
+ def _encode(
831
+ self, x: torch.Tensor, return_dict: bool = True
832
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
833
+ """
834
+ Encode a batch of images into latents.
835
+
836
+ Args:
837
+ x (`torch.Tensor`): Input batch of images.
838
+ return_dict (`bool`, *optional*, defaults to `True`):
839
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
840
+
841
+ Returns:
842
+ The latent representations of the encoded images. If `return_dict` is True, a
843
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
844
+ """
845
+ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_height or x.shape[-2] > self.tile_sample_min_width):
846
+ return self.tiled_encode(x, return_dict=return_dict)
847
+
848
+ first_frames = self.encoder(x[:, :, :1, :, :])
849
+ h = [first_frames]
850
+ for i in range(1, x.shape[2], self.num_sample_frames_batch_size):
851
+ next_frames = self.encoder(x[:, :, i : i + self.num_sample_frames_batch_size, :, :])
852
+ h.append(next_frames)
853
+ h = torch.cat(h, dim=2)
854
+ moments = self.quant_conv(h)
855
+
856
+ self._clear_conv_cache()
857
+ return moments
858
+
859
+ @apply_forward_hook
860
+ def encode(
861
+ self, x: torch.Tensor, return_dict: bool = True
862
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
863
+ """
864
+ Encode a batch of images into latents.
865
+
866
+ Args:
867
+ x (`torch.Tensor`): Input batch of images.
868
+ return_dict (`bool`, *optional*, defaults to `True`):
869
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
870
+
871
+ Returns:
872
+ The latent representations of the encoded videos. If `return_dict` is True, a
873
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
874
+ """
875
+ if self.use_slicing and x.shape[0] > 1:
876
+ encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
877
+ h = torch.cat(encoded_slices)
878
+ else:
879
+ h = self._encode(x)
880
+
881
+ posterior = DiagonalGaussianDistribution(h)
882
+
883
+ if not return_dict:
884
+ return (posterior,)
885
+ return AutoencoderKLOutput(latent_dist=posterior)
886
+
887
+ def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
888
+ batch_size, num_channels, num_frames, height, width = z.shape
889
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
890
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
891
+
892
+ if self.use_tiling and (z.shape[-1] > tile_latent_min_height or z.shape[-2] > tile_latent_min_width):
893
+ return self.tiled_decode(z, return_dict=return_dict)
894
+
895
+ z = self.post_quant_conv(z)
896
+
897
+ # Process the first frame and save the result
898
+ first_frames = self.decoder(z[:, :, :1, :, :])
899
+ # Initialize the list to store the processed frames, starting with the first frame
900
+ dec = [first_frames]
901
+ # Process the remaining frames, with the number of frames processed at a time determined by mini_batch_decoder
902
+ for i in range(1, z.shape[2], self.num_latent_frames_batch_size):
903
+ next_frames = self.decoder(z[:, :, i : i + self.num_latent_frames_batch_size, :, :])
904
+ dec.append(next_frames)
905
+ # Concatenate all processed frames along the channel dimension
906
+ dec = torch.cat(dec, dim=2)
907
+
908
+ if not return_dict:
909
+ return (dec,)
910
+
911
+ return DecoderOutput(sample=dec)
912
+
913
+ @apply_forward_hook
914
+ def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
915
+ """
916
+ Decode a batch of images.
917
+
918
+ Args:
919
+ z (`torch.Tensor`): Input batch of latent vectors.
920
+ return_dict (`bool`, *optional*, defaults to `True`):
921
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
922
+
923
+ Returns:
924
+ [`~models.vae.DecoderOutput`] or `tuple`:
925
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
926
+ returned.
927
+ """
928
+ if self.use_slicing and z.shape[0] > 1:
929
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
930
+ decoded = torch.cat(decoded_slices)
931
+ else:
932
+ decoded = self._decode(z).sample
933
+
934
+ self._clear_conv_cache()
935
+ if not return_dict:
936
+ return (decoded,)
937
+ return DecoderOutput(sample=decoded)
938
+
939
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
940
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
941
+ for y in range(blend_extent):
942
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
943
+ y / blend_extent
944
+ )
945
+ return b
946
+
947
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
948
+ blend_extent = min(a.shape[4], b.shape[4], blend_extent)
949
+ for x in range(blend_extent):
950
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
951
+ x / blend_extent
952
+ )
953
+ return b
954
+
955
+ def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> AutoencoderKLOutput:
956
+ batch_size, num_channels, num_frames, height, width = x.shape
957
+ latent_height = height // self.spatial_compression_ratio
958
+ latent_width = width // self.spatial_compression_ratio
959
+
960
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
961
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
962
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
963
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
964
+
965
+ blend_height = tile_latent_min_height - tile_latent_stride_height
966
+ blend_width = tile_latent_min_width - tile_latent_stride_width
967
+
968
+ # Split the image into 512x512 tiles and encode them separately.
969
+ rows = []
970
+ for i in range(0, height, self.tile_sample_stride_height):
971
+ row = []
972
+ for j in range(0, width, self.tile_sample_stride_width):
973
+ tile = x[
974
+ :,
975
+ :,
976
+ :,
977
+ i : i + self.tile_sample_min_height,
978
+ j : j + self.tile_sample_min_width,
979
+ ]
980
+
981
+ first_frames = self.encoder(tile[:, :, 0:1, :, :])
982
+ tile_h = [first_frames]
983
+ for k in range(1, num_frames, self.num_sample_frames_batch_size):
984
+ next_frames = self.encoder(tile[:, :, k : k + self.num_sample_frames_batch_size, :, :])
985
+ tile_h.append(next_frames)
986
+ tile = torch.cat(tile_h, dim=2)
987
+ tile = self.quant_conv(tile)
988
+ self._clear_conv_cache()
989
+ row.append(tile)
990
+ rows.append(row)
991
+ result_rows = []
992
+ for i, row in enumerate(rows):
993
+ result_row = []
994
+ for j, tile in enumerate(row):
995
+ # blend the above tile and the left tile
996
+ # to the current tile and add the current tile to the result row
997
+ if i > 0:
998
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
999
+ if j > 0:
1000
+ tile = self.blend_h(row[j - 1], tile, blend_width)
1001
+ result_row.append(tile[:, :, :, :latent_height, :latent_width])
1002
+ result_rows.append(torch.cat(result_row, dim=4))
1003
+
1004
+ moments = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width]
1005
+ return moments
1006
+
1007
+ def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
1008
+ batch_size, num_channels, num_frames, height, width = z.shape
1009
+ sample_height = height * self.spatial_compression_ratio
1010
+ sample_width = width * self.spatial_compression_ratio
1011
+
1012
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1013
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1014
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
1015
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
1016
+
1017
+ blend_height = self.tile_sample_min_height - self.tile_sample_stride_height
1018
+ blend_width = self.tile_sample_min_width - self.tile_sample_stride_width
1019
+
1020
+ # Split z into overlapping 64x64 tiles and decode them separately.
1021
+ # The tiles have an overlap to avoid seams between tiles.
1022
+ rows = []
1023
+ for i in range(0, height, tile_latent_stride_height):
1024
+ row = []
1025
+ for j in range(0, width, tile_latent_stride_width):
1026
+ tile = z[
1027
+ :,
1028
+ :,
1029
+ :,
1030
+ i : i + tile_latent_min_height,
1031
+ j : j + tile_latent_min_width,
1032
+ ]
1033
+ tile = self.post_quant_conv(tile)
1034
+
1035
+ # Process the first frame and save the result
1036
+ first_frames = self.decoder(tile[:, :, :1, :, :])
1037
+ # Initialize the list to store the processed frames, starting with the first frame
1038
+ tile_dec = [first_frames]
1039
+ # Process the remaining frames, with the number of frames processed at a time determined by mini_batch_decoder
1040
+ for k in range(1, num_frames, self.num_latent_frames_batch_size):
1041
+ next_frames = self.decoder(tile[:, :, k : k + self.num_latent_frames_batch_size, :, :])
1042
+ tile_dec.append(next_frames)
1043
+ # Concatenate all processed frames along the channel dimension
1044
+ decoded = torch.cat(tile_dec, dim=2)
1045
+ self._clear_conv_cache()
1046
+ row.append(decoded)
1047
+ rows.append(row)
1048
+ result_rows = []
1049
+ for i, row in enumerate(rows):
1050
+ result_row = []
1051
+ for j, tile in enumerate(row):
1052
+ # blend the above tile and the left tile
1053
+ # to the current tile and add the current tile to the result row
1054
+ if i > 0:
1055
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
1056
+ if j > 0:
1057
+ tile = self.blend_h(row[j - 1], tile, blend_width)
1058
+ result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width])
1059
+ result_rows.append(torch.cat(result_row, dim=4))
1060
+
1061
+ dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width]
1062
+
1063
+ if not return_dict:
1064
+ return (dec,)
1065
+
1066
+ return DecoderOutput(sample=dec)
1067
+
1068
+ def forward(
1069
+ self,
1070
+ sample: torch.Tensor,
1071
+ sample_posterior: bool = False,
1072
+ return_dict: bool = True,
1073
+ generator: Optional[torch.Generator] = None,
1074
+ ) -> Union[DecoderOutput, torch.Tensor]:
1075
+ r"""
1076
+ Args:
1077
+ sample (`torch.Tensor`): Input sample.
1078
+ sample_posterior (`bool`, *optional*, defaults to `False`):
1079
+ Whether to sample from the posterior.
1080
+ return_dict (`bool`, *optional*, defaults to `True`):
1081
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
1082
+ """
1083
+ x = sample
1084
+ posterior = self.encode(x).latent_dist
1085
+ if sample_posterior:
1086
+ z = posterior.sample(generator=generator)
1087
+ else:
1088
+ z = posterior.mode()
1089
+ dec = self.decode(z).sample
1090
+
1091
+ if not return_dict:
1092
+ return (dec,)
1093
+
1094
+ return DecoderOutput(sample=dec)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py ADDED
@@ -0,0 +1,1131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The Mochi team and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import functools
17
+ from typing import Dict, Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...utils import logging
24
+ from ...utils.accelerate_utils import apply_forward_hook
25
+ from ..activations import get_activation
26
+ from ..attention_processor import Attention, MochiVaeAttnProcessor2_0
27
+ from ..modeling_outputs import AutoencoderKLOutput
28
+ from ..modeling_utils import ModelMixin
29
+ from .autoencoder_kl_cogvideox import CogVideoXCausalConv3d
30
+ from .vae import DecoderOutput, DiagonalGaussianDistribution
31
+
32
+
33
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
+
35
+
36
+ class MochiChunkedGroupNorm3D(nn.Module):
37
+ r"""
38
+ Applies per-frame group normalization for 5D video inputs. It also supports memory-efficient chunked group
39
+ normalization.
40
+
41
+ Args:
42
+ num_channels (int): Number of channels expected in input
43
+ num_groups (int, optional): Number of groups to separate the channels into. Default: 32
44
+ affine (bool, optional): If True, this module has learnable affine parameters. Default: True
45
+ chunk_size (int, optional): Size of each chunk for processing. Default: 8
46
+
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ num_channels: int,
52
+ num_groups: int = 32,
53
+ affine: bool = True,
54
+ chunk_size: int = 8,
55
+ ):
56
+ super().__init__()
57
+ self.norm_layer = nn.GroupNorm(num_channels=num_channels, num_groups=num_groups, affine=affine)
58
+ self.chunk_size = chunk_size
59
+
60
+ def forward(self, x: torch.Tensor = None) -> torch.Tensor:
61
+ batch_size = x.size(0)
62
+
63
+ x = x.permute(0, 2, 1, 3, 4).flatten(0, 1)
64
+ output = torch.cat([self.norm_layer(chunk) for chunk in x.split(self.chunk_size, dim=0)], dim=0)
65
+ output = output.unflatten(0, (batch_size, -1)).permute(0, 2, 1, 3, 4)
66
+
67
+ return output
68
+
69
+
70
+ class MochiResnetBlock3D(nn.Module):
71
+ r"""
72
+ A 3D ResNet block used in the Mochi model.
73
+
74
+ Args:
75
+ in_channels (`int`):
76
+ Number of input channels.
77
+ out_channels (`int`, *optional*):
78
+ Number of output channels. If None, defaults to `in_channels`.
79
+ non_linearity (`str`, defaults to `"swish"`):
80
+ Activation function to use.
81
+ """
82
+
83
+ def __init__(
84
+ self,
85
+ in_channels: int,
86
+ out_channels: Optional[int] = None,
87
+ act_fn: str = "swish",
88
+ ):
89
+ super().__init__()
90
+
91
+ out_channels = out_channels or in_channels
92
+
93
+ self.in_channels = in_channels
94
+ self.out_channels = out_channels
95
+ self.nonlinearity = get_activation(act_fn)
96
+
97
+ self.norm1 = MochiChunkedGroupNorm3D(num_channels=in_channels)
98
+ self.conv1 = CogVideoXCausalConv3d(
99
+ in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, pad_mode="replicate"
100
+ )
101
+ self.norm2 = MochiChunkedGroupNorm3D(num_channels=out_channels)
102
+ self.conv2 = CogVideoXCausalConv3d(
103
+ in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, pad_mode="replicate"
104
+ )
105
+
106
+ def forward(
107
+ self,
108
+ inputs: torch.Tensor,
109
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
110
+ ) -> torch.Tensor:
111
+ new_conv_cache = {}
112
+ conv_cache = conv_cache or {}
113
+
114
+ hidden_states = inputs
115
+
116
+ hidden_states = self.norm1(hidden_states)
117
+ hidden_states = self.nonlinearity(hidden_states)
118
+ hidden_states, new_conv_cache["conv1"] = self.conv1(hidden_states, conv_cache=conv_cache.get("conv1"))
119
+
120
+ hidden_states = self.norm2(hidden_states)
121
+ hidden_states = self.nonlinearity(hidden_states)
122
+ hidden_states, new_conv_cache["conv2"] = self.conv2(hidden_states, conv_cache=conv_cache.get("conv2"))
123
+
124
+ hidden_states = hidden_states + inputs
125
+ return hidden_states, new_conv_cache
126
+
127
+
128
+ class MochiDownBlock3D(nn.Module):
129
+ r"""
130
+ An downsampling block used in the Mochi model.
131
+
132
+ Args:
133
+ in_channels (`int`):
134
+ Number of input channels.
135
+ out_channels (`int`, *optional*):
136
+ Number of output channels. If None, defaults to `in_channels`.
137
+ num_layers (`int`, defaults to `1`):
138
+ Number of resnet blocks in the block.
139
+ temporal_expansion (`int`, defaults to `2`):
140
+ Temporal expansion factor.
141
+ spatial_expansion (`int`, defaults to `2`):
142
+ Spatial expansion factor.
143
+ """
144
+
145
+ def __init__(
146
+ self,
147
+ in_channels: int,
148
+ out_channels: int,
149
+ num_layers: int = 1,
150
+ temporal_expansion: int = 2,
151
+ spatial_expansion: int = 2,
152
+ add_attention: bool = True,
153
+ ):
154
+ super().__init__()
155
+ self.temporal_expansion = temporal_expansion
156
+ self.spatial_expansion = spatial_expansion
157
+
158
+ self.conv_in = CogVideoXCausalConv3d(
159
+ in_channels=in_channels,
160
+ out_channels=out_channels,
161
+ kernel_size=(temporal_expansion, spatial_expansion, spatial_expansion),
162
+ stride=(temporal_expansion, spatial_expansion, spatial_expansion),
163
+ pad_mode="replicate",
164
+ )
165
+
166
+ resnets = []
167
+ norms = []
168
+ attentions = []
169
+ for _ in range(num_layers):
170
+ resnets.append(MochiResnetBlock3D(in_channels=out_channels))
171
+ if add_attention:
172
+ norms.append(MochiChunkedGroupNorm3D(num_channels=out_channels))
173
+ attentions.append(
174
+ Attention(
175
+ query_dim=out_channels,
176
+ heads=out_channels // 32,
177
+ dim_head=32,
178
+ qk_norm="l2",
179
+ is_causal=True,
180
+ processor=MochiVaeAttnProcessor2_0(),
181
+ )
182
+ )
183
+ else:
184
+ norms.append(None)
185
+ attentions.append(None)
186
+
187
+ self.resnets = nn.ModuleList(resnets)
188
+ self.norms = nn.ModuleList(norms)
189
+ self.attentions = nn.ModuleList(attentions)
190
+
191
+ self.gradient_checkpointing = False
192
+
193
+ def forward(
194
+ self,
195
+ hidden_states: torch.Tensor,
196
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
197
+ chunk_size: int = 2**15,
198
+ ) -> torch.Tensor:
199
+ r"""Forward method of the `MochiUpBlock3D` class."""
200
+
201
+ new_conv_cache = {}
202
+ conv_cache = conv_cache or {}
203
+
204
+ hidden_states, new_conv_cache["conv_in"] = self.conv_in(hidden_states)
205
+
206
+ for i, (resnet, norm, attn) in enumerate(zip(self.resnets, self.norms, self.attentions)):
207
+ conv_cache_key = f"resnet_{i}"
208
+
209
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
210
+ hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
211
+ resnet,
212
+ hidden_states,
213
+ conv_cache.get(conv_cache_key),
214
+ )
215
+ else:
216
+ hidden_states, new_conv_cache[conv_cache_key] = resnet(
217
+ hidden_states, conv_cache=conv_cache.get(conv_cache_key)
218
+ )
219
+
220
+ if attn is not None:
221
+ residual = hidden_states
222
+ hidden_states = norm(hidden_states)
223
+
224
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
225
+ hidden_states = hidden_states.permute(0, 3, 4, 2, 1).flatten(0, 2).contiguous()
226
+
227
+ # Perform attention in chunks to avoid following error:
228
+ # RuntimeError: CUDA error: invalid configuration argument
229
+ if hidden_states.size(0) <= chunk_size:
230
+ hidden_states = attn(hidden_states)
231
+ else:
232
+ hidden_states_chunks = []
233
+ for i in range(0, hidden_states.size(0), chunk_size):
234
+ hidden_states_chunk = hidden_states[i : i + chunk_size]
235
+ hidden_states_chunk = attn(hidden_states_chunk)
236
+ hidden_states_chunks.append(hidden_states_chunk)
237
+ hidden_states = torch.cat(hidden_states_chunks)
238
+
239
+ hidden_states = hidden_states.unflatten(0, (batch_size, height, width)).permute(0, 4, 3, 1, 2)
240
+
241
+ hidden_states = residual + hidden_states
242
+
243
+ return hidden_states, new_conv_cache
244
+
245
+
246
+ class MochiMidBlock3D(nn.Module):
247
+ r"""
248
+ A middle block used in the Mochi model.
249
+
250
+ Args:
251
+ in_channels (`int`):
252
+ Number of input channels.
253
+ num_layers (`int`, defaults to `3`):
254
+ Number of resnet blocks in the block.
255
+ """
256
+
257
+ def __init__(
258
+ self,
259
+ in_channels: int, # 768
260
+ num_layers: int = 3,
261
+ add_attention: bool = True,
262
+ ):
263
+ super().__init__()
264
+
265
+ resnets = []
266
+ norms = []
267
+ attentions = []
268
+
269
+ for _ in range(num_layers):
270
+ resnets.append(MochiResnetBlock3D(in_channels=in_channels))
271
+
272
+ if add_attention:
273
+ norms.append(MochiChunkedGroupNorm3D(num_channels=in_channels))
274
+ attentions.append(
275
+ Attention(
276
+ query_dim=in_channels,
277
+ heads=in_channels // 32,
278
+ dim_head=32,
279
+ qk_norm="l2",
280
+ is_causal=True,
281
+ processor=MochiVaeAttnProcessor2_0(),
282
+ )
283
+ )
284
+ else:
285
+ norms.append(None)
286
+ attentions.append(None)
287
+
288
+ self.resnets = nn.ModuleList(resnets)
289
+ self.norms = nn.ModuleList(norms)
290
+ self.attentions = nn.ModuleList(attentions)
291
+
292
+ self.gradient_checkpointing = False
293
+
294
+ def forward(
295
+ self,
296
+ hidden_states: torch.Tensor,
297
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
298
+ ) -> torch.Tensor:
299
+ r"""Forward method of the `MochiMidBlock3D` class."""
300
+
301
+ new_conv_cache = {}
302
+ conv_cache = conv_cache or {}
303
+
304
+ for i, (resnet, norm, attn) in enumerate(zip(self.resnets, self.norms, self.attentions)):
305
+ conv_cache_key = f"resnet_{i}"
306
+
307
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
308
+ hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
309
+ resnet, hidden_states, conv_cache.get(conv_cache_key)
310
+ )
311
+ else:
312
+ hidden_states, new_conv_cache[conv_cache_key] = resnet(
313
+ hidden_states, conv_cache=conv_cache.get(conv_cache_key)
314
+ )
315
+
316
+ if attn is not None:
317
+ residual = hidden_states
318
+ hidden_states = norm(hidden_states)
319
+
320
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
321
+ hidden_states = hidden_states.permute(0, 3, 4, 2, 1).flatten(0, 2).contiguous()
322
+ hidden_states = attn(hidden_states)
323
+ hidden_states = hidden_states.unflatten(0, (batch_size, height, width)).permute(0, 4, 3, 1, 2)
324
+
325
+ hidden_states = residual + hidden_states
326
+
327
+ return hidden_states, new_conv_cache
328
+
329
+
330
+ class MochiUpBlock3D(nn.Module):
331
+ r"""
332
+ An upsampling block used in the Mochi model.
333
+
334
+ Args:
335
+ in_channels (`int`):
336
+ Number of input channels.
337
+ out_channels (`int`, *optional*):
338
+ Number of output channels. If None, defaults to `in_channels`.
339
+ num_layers (`int`, defaults to `1`):
340
+ Number of resnet blocks in the block.
341
+ temporal_expansion (`int`, defaults to `2`):
342
+ Temporal expansion factor.
343
+ spatial_expansion (`int`, defaults to `2`):
344
+ Spatial expansion factor.
345
+ """
346
+
347
+ def __init__(
348
+ self,
349
+ in_channels: int,
350
+ out_channels: int,
351
+ num_layers: int = 1,
352
+ temporal_expansion: int = 2,
353
+ spatial_expansion: int = 2,
354
+ ):
355
+ super().__init__()
356
+ self.temporal_expansion = temporal_expansion
357
+ self.spatial_expansion = spatial_expansion
358
+
359
+ resnets = []
360
+ for _ in range(num_layers):
361
+ resnets.append(MochiResnetBlock3D(in_channels=in_channels))
362
+ self.resnets = nn.ModuleList(resnets)
363
+
364
+ self.proj = nn.Linear(in_channels, out_channels * temporal_expansion * spatial_expansion**2)
365
+
366
+ self.gradient_checkpointing = False
367
+
368
+ def forward(
369
+ self,
370
+ hidden_states: torch.Tensor,
371
+ conv_cache: Optional[Dict[str, torch.Tensor]] = None,
372
+ ) -> torch.Tensor:
373
+ r"""Forward method of the `MochiUpBlock3D` class."""
374
+
375
+ new_conv_cache = {}
376
+ conv_cache = conv_cache or {}
377
+
378
+ for i, resnet in enumerate(self.resnets):
379
+ conv_cache_key = f"resnet_{i}"
380
+
381
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
382
+ hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
383
+ resnet,
384
+ hidden_states,
385
+ conv_cache.get(conv_cache_key),
386
+ )
387
+ else:
388
+ hidden_states, new_conv_cache[conv_cache_key] = resnet(
389
+ hidden_states, conv_cache=conv_cache.get(conv_cache_key)
390
+ )
391
+
392
+ hidden_states = hidden_states.permute(0, 2, 3, 4, 1)
393
+ hidden_states = self.proj(hidden_states)
394
+ hidden_states = hidden_states.permute(0, 4, 1, 2, 3)
395
+
396
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
397
+ st = self.temporal_expansion
398
+ sh = self.spatial_expansion
399
+ sw = self.spatial_expansion
400
+
401
+ # Reshape and unpatchify
402
+ hidden_states = hidden_states.view(batch_size, -1, st, sh, sw, num_frames, height, width)
403
+ hidden_states = hidden_states.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
404
+ hidden_states = hidden_states.view(batch_size, -1, num_frames * st, height * sh, width * sw)
405
+
406
+ return hidden_states, new_conv_cache
407
+
408
+
409
+ class FourierFeatures(nn.Module):
410
+ def __init__(self, start: int = 6, stop: int = 8, step: int = 1):
411
+ super().__init__()
412
+
413
+ self.start = start
414
+ self.stop = stop
415
+ self.step = step
416
+
417
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
418
+ r"""Forward method of the `FourierFeatures` class."""
419
+ original_dtype = inputs.dtype
420
+ inputs = inputs.to(torch.float32)
421
+ num_channels = inputs.shape[1]
422
+ num_freqs = (self.stop - self.start) // self.step
423
+
424
+ freqs = torch.arange(self.start, self.stop, self.step, dtype=inputs.dtype, device=inputs.device)
425
+ w = torch.pow(2.0, freqs) * (2 * torch.pi) # [num_freqs]
426
+ w = w.repeat(num_channels)[None, :, None, None, None] # [1, num_channels * num_freqs, 1, 1, 1]
427
+
428
+ # Interleaved repeat of input channels to match w
429
+ h = inputs.repeat_interleave(
430
+ num_freqs, dim=1, output_size=inputs.shape[1] * num_freqs
431
+ ) # [B, C * num_freqs, T, H, W]
432
+ # Scale channels by frequency.
433
+ h = w * h
434
+
435
+ return torch.cat([inputs, torch.sin(h), torch.cos(h)], dim=1).to(original_dtype)
436
+
437
+
438
+ class MochiEncoder3D(nn.Module):
439
+ r"""
440
+ The `MochiEncoder3D` layer of a variational autoencoder that encodes input video samples to its latent
441
+ representation.
442
+
443
+ Args:
444
+ in_channels (`int`, *optional*):
445
+ The number of input channels.
446
+ out_channels (`int`, *optional*):
447
+ The number of output channels.
448
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(128, 256, 512, 768)`):
449
+ The number of output channels for each block.
450
+ layers_per_block (`Tuple[int, ...]`, *optional*, defaults to `(3, 3, 4, 6, 3)`):
451
+ The number of resnet blocks for each block.
452
+ temporal_expansions (`Tuple[int, ...]`, *optional*, defaults to `(1, 2, 3)`):
453
+ The temporal expansion factor for each of the up blocks.
454
+ spatial_expansions (`Tuple[int, ...]`, *optional*, defaults to `(2, 2, 2)`):
455
+ The spatial expansion factor for each of the up blocks.
456
+ non_linearity (`str`, *optional*, defaults to `"swish"`):
457
+ The non-linearity to use in the decoder.
458
+ """
459
+
460
+ def __init__(
461
+ self,
462
+ in_channels: int,
463
+ out_channels: int,
464
+ block_out_channels: Tuple[int, ...] = (128, 256, 512, 768),
465
+ layers_per_block: Tuple[int, ...] = (3, 3, 4, 6, 3),
466
+ temporal_expansions: Tuple[int, ...] = (1, 2, 3),
467
+ spatial_expansions: Tuple[int, ...] = (2, 2, 2),
468
+ add_attention_block: Tuple[bool, ...] = (False, True, True, True, True),
469
+ act_fn: str = "swish",
470
+ ):
471
+ super().__init__()
472
+
473
+ self.nonlinearity = get_activation(act_fn)
474
+
475
+ self.fourier_features = FourierFeatures()
476
+ self.proj_in = nn.Linear(in_channels, block_out_channels[0])
477
+ self.block_in = MochiMidBlock3D(
478
+ in_channels=block_out_channels[0], num_layers=layers_per_block[0], add_attention=add_attention_block[0]
479
+ )
480
+
481
+ down_blocks = []
482
+ for i in range(len(block_out_channels) - 1):
483
+ down_block = MochiDownBlock3D(
484
+ in_channels=block_out_channels[i],
485
+ out_channels=block_out_channels[i + 1],
486
+ num_layers=layers_per_block[i + 1],
487
+ temporal_expansion=temporal_expansions[i],
488
+ spatial_expansion=spatial_expansions[i],
489
+ add_attention=add_attention_block[i + 1],
490
+ )
491
+ down_blocks.append(down_block)
492
+ self.down_blocks = nn.ModuleList(down_blocks)
493
+
494
+ self.block_out = MochiMidBlock3D(
495
+ in_channels=block_out_channels[-1], num_layers=layers_per_block[-1], add_attention=add_attention_block[-1]
496
+ )
497
+ self.norm_out = MochiChunkedGroupNorm3D(block_out_channels[-1])
498
+ self.proj_out = nn.Linear(block_out_channels[-1], 2 * out_channels, bias=False)
499
+
500
+ self.gradient_checkpointing = False
501
+
502
+ def forward(
503
+ self, hidden_states: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None
504
+ ) -> torch.Tensor:
505
+ r"""Forward method of the `MochiEncoder3D` class."""
506
+
507
+ new_conv_cache = {}
508
+ conv_cache = conv_cache or {}
509
+
510
+ hidden_states = self.fourier_features(hidden_states)
511
+
512
+ hidden_states = hidden_states.permute(0, 2, 3, 4, 1)
513
+ hidden_states = self.proj_in(hidden_states)
514
+ hidden_states = hidden_states.permute(0, 4, 1, 2, 3)
515
+
516
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
517
+ hidden_states, new_conv_cache["block_in"] = self._gradient_checkpointing_func(
518
+ self.block_in, hidden_states, conv_cache.get("block_in")
519
+ )
520
+
521
+ for i, down_block in enumerate(self.down_blocks):
522
+ conv_cache_key = f"down_block_{i}"
523
+ hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
524
+ down_block, hidden_states, conv_cache.get(conv_cache_key)
525
+ )
526
+ else:
527
+ hidden_states, new_conv_cache["block_in"] = self.block_in(
528
+ hidden_states, conv_cache=conv_cache.get("block_in")
529
+ )
530
+
531
+ for i, down_block in enumerate(self.down_blocks):
532
+ conv_cache_key = f"down_block_{i}"
533
+ hidden_states, new_conv_cache[conv_cache_key] = down_block(
534
+ hidden_states, conv_cache=conv_cache.get(conv_cache_key)
535
+ )
536
+
537
+ hidden_states, new_conv_cache["block_out"] = self.block_out(
538
+ hidden_states, conv_cache=conv_cache.get("block_out")
539
+ )
540
+
541
+ hidden_states = self.norm_out(hidden_states)
542
+ hidden_states = self.nonlinearity(hidden_states)
543
+
544
+ hidden_states = hidden_states.permute(0, 2, 3, 4, 1)
545
+ hidden_states = self.proj_out(hidden_states)
546
+ hidden_states = hidden_states.permute(0, 4, 1, 2, 3)
547
+
548
+ return hidden_states, new_conv_cache
549
+
550
+
551
+ class MochiDecoder3D(nn.Module):
552
+ r"""
553
+ The `MochiDecoder3D` layer of a variational autoencoder that decodes its latent representation into an output
554
+ sample.
555
+
556
+ Args:
557
+ in_channels (`int`, *optional*):
558
+ The number of input channels.
559
+ out_channels (`int`, *optional*):
560
+ The number of output channels.
561
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(128, 256, 512, 768)`):
562
+ The number of output channels for each block.
563
+ layers_per_block (`Tuple[int, ...]`, *optional*, defaults to `(3, 3, 4, 6, 3)`):
564
+ The number of resnet blocks for each block.
565
+ temporal_expansions (`Tuple[int, ...]`, *optional*, defaults to `(1, 2, 3)`):
566
+ The temporal expansion factor for each of the up blocks.
567
+ spatial_expansions (`Tuple[int, ...]`, *optional*, defaults to `(2, 2, 2)`):
568
+ The spatial expansion factor for each of the up blocks.
569
+ non_linearity (`str`, *optional*, defaults to `"swish"`):
570
+ The non-linearity to use in the decoder.
571
+ """
572
+
573
+ def __init__(
574
+ self,
575
+ in_channels: int, # 12
576
+ out_channels: int, # 3
577
+ block_out_channels: Tuple[int, ...] = (128, 256, 512, 768),
578
+ layers_per_block: Tuple[int, ...] = (3, 3, 4, 6, 3),
579
+ temporal_expansions: Tuple[int, ...] = (1, 2, 3),
580
+ spatial_expansions: Tuple[int, ...] = (2, 2, 2),
581
+ act_fn: str = "swish",
582
+ ):
583
+ super().__init__()
584
+
585
+ self.nonlinearity = get_activation(act_fn)
586
+
587
+ self.conv_in = nn.Conv3d(in_channels, block_out_channels[-1], kernel_size=(1, 1, 1))
588
+ self.block_in = MochiMidBlock3D(
589
+ in_channels=block_out_channels[-1],
590
+ num_layers=layers_per_block[-1],
591
+ add_attention=False,
592
+ )
593
+
594
+ up_blocks = []
595
+ for i in range(len(block_out_channels) - 1):
596
+ up_block = MochiUpBlock3D(
597
+ in_channels=block_out_channels[-i - 1],
598
+ out_channels=block_out_channels[-i - 2],
599
+ num_layers=layers_per_block[-i - 2],
600
+ temporal_expansion=temporal_expansions[-i - 1],
601
+ spatial_expansion=spatial_expansions[-i - 1],
602
+ )
603
+ up_blocks.append(up_block)
604
+ self.up_blocks = nn.ModuleList(up_blocks)
605
+
606
+ self.block_out = MochiMidBlock3D(
607
+ in_channels=block_out_channels[0],
608
+ num_layers=layers_per_block[0],
609
+ add_attention=False,
610
+ )
611
+ self.proj_out = nn.Linear(block_out_channels[0], out_channels)
612
+
613
+ self.gradient_checkpointing = False
614
+
615
+ def forward(
616
+ self, hidden_states: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None
617
+ ) -> torch.Tensor:
618
+ r"""Forward method of the `MochiDecoder3D` class."""
619
+
620
+ new_conv_cache = {}
621
+ conv_cache = conv_cache or {}
622
+
623
+ hidden_states = self.conv_in(hidden_states)
624
+
625
+ # 1. Mid
626
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
627
+ hidden_states, new_conv_cache["block_in"] = self._gradient_checkpointing_func(
628
+ self.block_in, hidden_states, conv_cache.get("block_in")
629
+ )
630
+
631
+ for i, up_block in enumerate(self.up_blocks):
632
+ conv_cache_key = f"up_block_{i}"
633
+ hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
634
+ up_block, hidden_states, conv_cache.get(conv_cache_key)
635
+ )
636
+ else:
637
+ hidden_states, new_conv_cache["block_in"] = self.block_in(
638
+ hidden_states, conv_cache=conv_cache.get("block_in")
639
+ )
640
+
641
+ for i, up_block in enumerate(self.up_blocks):
642
+ conv_cache_key = f"up_block_{i}"
643
+ hidden_states, new_conv_cache[conv_cache_key] = up_block(
644
+ hidden_states, conv_cache=conv_cache.get(conv_cache_key)
645
+ )
646
+
647
+ hidden_states, new_conv_cache["block_out"] = self.block_out(
648
+ hidden_states, conv_cache=conv_cache.get("block_out")
649
+ )
650
+
651
+ hidden_states = self.nonlinearity(hidden_states)
652
+
653
+ hidden_states = hidden_states.permute(0, 2, 3, 4, 1)
654
+ hidden_states = self.proj_out(hidden_states)
655
+ hidden_states = hidden_states.permute(0, 4, 1, 2, 3)
656
+
657
+ return hidden_states, new_conv_cache
658
+
659
+
660
+ class AutoencoderKLMochi(ModelMixin, ConfigMixin):
661
+ r"""
662
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in
663
+ [Mochi 1 preview](https://github.com/genmoai/models).
664
+
665
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
666
+ for all models (such as downloading or saving).
667
+
668
+ Parameters:
669
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
670
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
671
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
672
+ Tuple of block output channels.
673
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
674
+ scaling_factor (`float`, *optional*, defaults to `1.15258426`):
675
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
676
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
677
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
678
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
679
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
680
+ Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper.
681
+ """
682
+
683
+ _supports_gradient_checkpointing = True
684
+ _no_split_modules = ["MochiResnetBlock3D"]
685
+
686
+ @register_to_config
687
+ def __init__(
688
+ self,
689
+ in_channels: int = 15,
690
+ out_channels: int = 3,
691
+ encoder_block_out_channels: Tuple[int] = (64, 128, 256, 384),
692
+ decoder_block_out_channels: Tuple[int] = (128, 256, 512, 768),
693
+ latent_channels: int = 12,
694
+ layers_per_block: Tuple[int, ...] = (3, 3, 4, 6, 3),
695
+ act_fn: str = "silu",
696
+ temporal_expansions: Tuple[int, ...] = (1, 2, 3),
697
+ spatial_expansions: Tuple[int, ...] = (2, 2, 2),
698
+ add_attention_block: Tuple[bool, ...] = (False, True, True, True, True),
699
+ latents_mean: Tuple[float, ...] = (
700
+ -0.06730895953510081,
701
+ -0.038011381506090416,
702
+ -0.07477820912866141,
703
+ -0.05565264470995561,
704
+ 0.012767231469026969,
705
+ -0.04703542746246419,
706
+ 0.043896967884726704,
707
+ -0.09346305707025976,
708
+ -0.09918314763016893,
709
+ -0.008729793427399178,
710
+ -0.011931556316503654,
711
+ -0.0321993391887285,
712
+ ),
713
+ latents_std: Tuple[float, ...] = (
714
+ 0.9263795028493863,
715
+ 0.9248894543193766,
716
+ 0.9393059390890617,
717
+ 0.959253732819592,
718
+ 0.8244560132752793,
719
+ 0.917259975397747,
720
+ 0.9294154431013696,
721
+ 1.3720942357788521,
722
+ 0.881393668867029,
723
+ 0.9168315692124348,
724
+ 0.9185249279345552,
725
+ 0.9274757570805041,
726
+ ),
727
+ scaling_factor: float = 1.0,
728
+ ):
729
+ super().__init__()
730
+
731
+ self.encoder = MochiEncoder3D(
732
+ in_channels=in_channels,
733
+ out_channels=latent_channels,
734
+ block_out_channels=encoder_block_out_channels,
735
+ layers_per_block=layers_per_block,
736
+ temporal_expansions=temporal_expansions,
737
+ spatial_expansions=spatial_expansions,
738
+ add_attention_block=add_attention_block,
739
+ act_fn=act_fn,
740
+ )
741
+ self.decoder = MochiDecoder3D(
742
+ in_channels=latent_channels,
743
+ out_channels=out_channels,
744
+ block_out_channels=decoder_block_out_channels,
745
+ layers_per_block=layers_per_block,
746
+ temporal_expansions=temporal_expansions,
747
+ spatial_expansions=spatial_expansions,
748
+ act_fn=act_fn,
749
+ )
750
+
751
+ self.spatial_compression_ratio = functools.reduce(lambda x, y: x * y, spatial_expansions, 1)
752
+ self.temporal_compression_ratio = functools.reduce(lambda x, y: x * y, temporal_expansions, 1)
753
+
754
+ # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension
755
+ # to perform decoding of a single video latent at a time.
756
+ self.use_slicing = False
757
+
758
+ # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent
759
+ # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the
760
+ # intermediate tiles together, the memory requirement can be lowered.
761
+ self.use_tiling = False
762
+
763
+ # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames
764
+ # at a fixed frame batch size (based on `self.num_latent_frames_batch_sizes`), the memory requirement can be lowered.
765
+ self.use_framewise_encoding = False
766
+ self.use_framewise_decoding = False
767
+
768
+ # This can be used to determine how the number of output frames in the final decoded video. To maintain consistency with
769
+ # the original implementation, this defaults to `True`.
770
+ # - Original implementation (drop_last_temporal_frames=True):
771
+ # Output frames = (latent_frames - 1) * temporal_compression_ratio + 1
772
+ # - Without dropping additional temporal upscaled frames (drop_last_temporal_frames=False):
773
+ # Output frames = latent_frames * temporal_compression_ratio
774
+ # The latter case is useful for frame packing and some training/finetuning scenarios where the additional.
775
+ self.drop_last_temporal_frames = True
776
+
777
+ # This can be configured based on the amount of GPU memory available.
778
+ # `12` for sample frames and `2` for latent frames are sensible defaults for consumer GPUs.
779
+ # Setting it to higher values results in higher memory usage.
780
+ self.num_sample_frames_batch_size = 12
781
+ self.num_latent_frames_batch_size = 2
782
+
783
+ # The minimal tile height and width for spatial tiling to be used
784
+ self.tile_sample_min_height = 256
785
+ self.tile_sample_min_width = 256
786
+
787
+ # The minimal distance between two spatial tiles
788
+ self.tile_sample_stride_height = 192
789
+ self.tile_sample_stride_width = 192
790
+
791
+ def enable_tiling(
792
+ self,
793
+ tile_sample_min_height: Optional[int] = None,
794
+ tile_sample_min_width: Optional[int] = None,
795
+ tile_sample_stride_height: Optional[float] = None,
796
+ tile_sample_stride_width: Optional[float] = None,
797
+ ) -> None:
798
+ r"""
799
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
800
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
801
+ processing larger images.
802
+
803
+ Args:
804
+ tile_sample_min_height (`int`, *optional*):
805
+ The minimum height required for a sample to be separated into tiles across the height dimension.
806
+ tile_sample_min_width (`int`, *optional*):
807
+ The minimum width required for a sample to be separated into tiles across the width dimension.
808
+ tile_sample_stride_height (`int`, *optional*):
809
+ The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
810
+ no tiling artifacts produced across the height dimension.
811
+ tile_sample_stride_width (`int`, *optional*):
812
+ The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
813
+ artifacts produced across the width dimension.
814
+ """
815
+ self.use_tiling = True
816
+ self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
817
+ self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
818
+ self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height
819
+ self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width
820
+
821
+ def disable_tiling(self) -> None:
822
+ r"""
823
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
824
+ decoding in one step.
825
+ """
826
+ self.use_tiling = False
827
+
828
+ def enable_slicing(self) -> None:
829
+ r"""
830
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
831
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
832
+ """
833
+ self.use_slicing = True
834
+
835
+ def disable_slicing(self) -> None:
836
+ r"""
837
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
838
+ decoding in one step.
839
+ """
840
+ self.use_slicing = False
841
+
842
+ def _enable_framewise_encoding(self):
843
+ r"""
844
+ Enables the framewise VAE encoding implementation with past latent padding. By default, Diffusers uses the
845
+ oneshot encoding implementation without current latent replicate padding.
846
+
847
+ Warning: Framewise encoding may not work as expected due to the causal attention layers. If you enable
848
+ framewise encoding, encode a video, and try to decode it, there will be noticeable jittering effect.
849
+ """
850
+ self.use_framewise_encoding = True
851
+ for name, module in self.named_modules():
852
+ if isinstance(module, CogVideoXCausalConv3d):
853
+ module.pad_mode = "constant"
854
+
855
+ def _enable_framewise_decoding(self):
856
+ r"""
857
+ Enables the framewise VAE decoding implementation with past latent padding. By default, Diffusers uses the
858
+ oneshot decoding implementation without current latent replicate padding.
859
+ """
860
+ self.use_framewise_decoding = True
861
+ for name, module in self.named_modules():
862
+ if isinstance(module, CogVideoXCausalConv3d):
863
+ module.pad_mode = "constant"
864
+
865
+ def _encode(self, x: torch.Tensor) -> torch.Tensor:
866
+ batch_size, num_channels, num_frames, height, width = x.shape
867
+
868
+ if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height):
869
+ return self.tiled_encode(x)
870
+
871
+ if self.use_framewise_encoding:
872
+ raise NotImplementedError(
873
+ "Frame-wise encoding does not work with the Mochi VAE Encoder due to the presence of attention layers. "
874
+ "As intermediate frames are not independent from each other, they cannot be encoded frame-wise."
875
+ )
876
+ else:
877
+ enc, _ = self.encoder(x)
878
+
879
+ return enc
880
+
881
+ @apply_forward_hook
882
+ def encode(
883
+ self, x: torch.Tensor, return_dict: bool = True
884
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
885
+ """
886
+ Encode a batch of images into latents.
887
+
888
+ Args:
889
+ x (`torch.Tensor`): Input batch of images.
890
+ return_dict (`bool`, *optional*, defaults to `True`):
891
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
892
+
893
+ Returns:
894
+ The latent representations of the encoded videos. If `return_dict` is True, a
895
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
896
+ """
897
+ if self.use_slicing and x.shape[0] > 1:
898
+ encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
899
+ h = torch.cat(encoded_slices)
900
+ else:
901
+ h = self._encode(x)
902
+
903
+ posterior = DiagonalGaussianDistribution(h)
904
+
905
+ if not return_dict:
906
+ return (posterior,)
907
+ return AutoencoderKLOutput(latent_dist=posterior)
908
+
909
+ def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
910
+ batch_size, num_channels, num_frames, height, width = z.shape
911
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
912
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
913
+
914
+ if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height):
915
+ return self.tiled_decode(z, return_dict=return_dict)
916
+
917
+ if self.use_framewise_decoding:
918
+ conv_cache = None
919
+ dec = []
920
+
921
+ for i in range(0, num_frames, self.num_latent_frames_batch_size):
922
+ z_intermediate = z[:, :, i : i + self.num_latent_frames_batch_size]
923
+ z_intermediate, conv_cache = self.decoder(z_intermediate, conv_cache=conv_cache)
924
+ dec.append(z_intermediate)
925
+
926
+ dec = torch.cat(dec, dim=2)
927
+ else:
928
+ dec, _ = self.decoder(z)
929
+
930
+ if self.drop_last_temporal_frames and dec.size(2) >= self.temporal_compression_ratio:
931
+ dec = dec[:, :, self.temporal_compression_ratio - 1 :]
932
+
933
+ if not return_dict:
934
+ return (dec,)
935
+
936
+ return DecoderOutput(sample=dec)
937
+
938
+ @apply_forward_hook
939
+ def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
940
+ """
941
+ Decode a batch of images.
942
+
943
+ Args:
944
+ z (`torch.Tensor`): Input batch of latent vectors.
945
+ return_dict (`bool`, *optional*, defaults to `True`):
946
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
947
+
948
+ Returns:
949
+ [`~models.vae.DecoderOutput`] or `tuple`:
950
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
951
+ returned.
952
+ """
953
+ if self.use_slicing and z.shape[0] > 1:
954
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
955
+ decoded = torch.cat(decoded_slices)
956
+ else:
957
+ decoded = self._decode(z).sample
958
+
959
+ if not return_dict:
960
+ return (decoded,)
961
+
962
+ return DecoderOutput(sample=decoded)
963
+
964
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
965
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
966
+ for y in range(blend_extent):
967
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
968
+ y / blend_extent
969
+ )
970
+ return b
971
+
972
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
973
+ blend_extent = min(a.shape[4], b.shape[4], blend_extent)
974
+ for x in range(blend_extent):
975
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
976
+ x / blend_extent
977
+ )
978
+ return b
979
+
980
+ def tiled_encode(self, x: torch.Tensor) -> torch.Tensor:
981
+ r"""Encode a batch of images using a tiled encoder.
982
+
983
+ Args:
984
+ x (`torch.Tensor`): Input batch of videos.
985
+
986
+ Returns:
987
+ `torch.Tensor`:
988
+ The latent representation of the encoded videos.
989
+ """
990
+ batch_size, num_channels, num_frames, height, width = x.shape
991
+ latent_height = height // self.spatial_compression_ratio
992
+ latent_width = width // self.spatial_compression_ratio
993
+
994
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
995
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
996
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
997
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
998
+
999
+ blend_height = tile_latent_min_height - tile_latent_stride_height
1000
+ blend_width = tile_latent_min_width - tile_latent_stride_width
1001
+
1002
+ # Split x into overlapping tiles and encode them separately.
1003
+ # The tiles have an overlap to avoid seams between tiles.
1004
+ rows = []
1005
+ for i in range(0, height, self.tile_sample_stride_height):
1006
+ row = []
1007
+ for j in range(0, width, self.tile_sample_stride_width):
1008
+ if self.use_framewise_encoding:
1009
+ raise NotImplementedError(
1010
+ "Frame-wise encoding does not work with the Mochi VAE Encoder due to the presence of attention layers. "
1011
+ "As intermediate frames are not independent from each other, they cannot be encoded frame-wise."
1012
+ )
1013
+ else:
1014
+ time, _ = self.encoder(
1015
+ x[:, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width]
1016
+ )
1017
+
1018
+ row.append(time)
1019
+ rows.append(row)
1020
+
1021
+ result_rows = []
1022
+ for i, row in enumerate(rows):
1023
+ result_row = []
1024
+ for j, tile in enumerate(row):
1025
+ # blend the above tile and the left tile
1026
+ # to the current tile and add the current tile to the result row
1027
+ if i > 0:
1028
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
1029
+ if j > 0:
1030
+ tile = self.blend_h(row[j - 1], tile, blend_width)
1031
+ result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width])
1032
+ result_rows.append(torch.cat(result_row, dim=4))
1033
+
1034
+ enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width]
1035
+ return enc
1036
+
1037
+ def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
1038
+ r"""
1039
+ Decode a batch of images using a tiled decoder.
1040
+
1041
+ Args:
1042
+ z (`torch.Tensor`): Input batch of latent vectors.
1043
+ return_dict (`bool`, *optional*, defaults to `True`):
1044
+ Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
1045
+
1046
+ Returns:
1047
+ [`~models.vae.DecoderOutput`] or `tuple`:
1048
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
1049
+ returned.
1050
+ """
1051
+
1052
+ batch_size, num_channels, num_frames, height, width = z.shape
1053
+ sample_height = height * self.spatial_compression_ratio
1054
+ sample_width = width * self.spatial_compression_ratio
1055
+
1056
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1057
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1058
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
1059
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
1060
+
1061
+ blend_height = self.tile_sample_min_height - self.tile_sample_stride_height
1062
+ blend_width = self.tile_sample_min_width - self.tile_sample_stride_width
1063
+
1064
+ # Split z into overlapping tiles and decode them separately.
1065
+ # The tiles have an overlap to avoid seams between tiles.
1066
+ rows = []
1067
+ for i in range(0, height, tile_latent_stride_height):
1068
+ row = []
1069
+ for j in range(0, width, tile_latent_stride_width):
1070
+ if self.use_framewise_decoding:
1071
+ time = []
1072
+ conv_cache = None
1073
+
1074
+ for k in range(0, num_frames, self.num_latent_frames_batch_size):
1075
+ tile = z[
1076
+ :,
1077
+ :,
1078
+ k : k + self.num_latent_frames_batch_size,
1079
+ i : i + tile_latent_min_height,
1080
+ j : j + tile_latent_min_width,
1081
+ ]
1082
+ tile, conv_cache = self.decoder(tile, conv_cache=conv_cache)
1083
+ time.append(tile)
1084
+
1085
+ time = torch.cat(time, dim=2)
1086
+ else:
1087
+ time, _ = self.decoder(z[:, :, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width])
1088
+
1089
+ if self.drop_last_temporal_frames and time.size(2) >= self.temporal_compression_ratio:
1090
+ time = time[:, :, self.temporal_compression_ratio - 1 :]
1091
+
1092
+ row.append(time)
1093
+ rows.append(row)
1094
+
1095
+ result_rows = []
1096
+ for i, row in enumerate(rows):
1097
+ result_row = []
1098
+ for j, tile in enumerate(row):
1099
+ # blend the above tile and the left tile
1100
+ # to the current tile and add the current tile to the result row
1101
+ if i > 0:
1102
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
1103
+ if j > 0:
1104
+ tile = self.blend_h(row[j - 1], tile, blend_width)
1105
+ result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width])
1106
+ result_rows.append(torch.cat(result_row, dim=4))
1107
+
1108
+ dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width]
1109
+
1110
+ if not return_dict:
1111
+ return (dec,)
1112
+
1113
+ return DecoderOutput(sample=dec)
1114
+
1115
+ def forward(
1116
+ self,
1117
+ sample: torch.Tensor,
1118
+ sample_posterior: bool = False,
1119
+ return_dict: bool = True,
1120
+ generator: Optional[torch.Generator] = None,
1121
+ ) -> Union[torch.Tensor, torch.Tensor]:
1122
+ x = sample
1123
+ posterior = self.encode(x).latent_dist
1124
+ if sample_posterior:
1125
+ z = posterior.sample(generator=generator)
1126
+ else:
1127
+ z = posterior.mode()
1128
+ dec = self.decode(z)
1129
+ if not return_dict:
1130
+ return (dec,)
1131
+ return dec
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_qwenimage.py ADDED
@@ -0,0 +1,1070 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The Qwen-Image Team, Wan Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+ # We gratefully acknowledge the Wan Team for their outstanding contributions.
16
+ # QwenImageVAE is further fine-tuned from the Wan Video VAE to achieve improved performance.
17
+ # For more information about the Wan VAE, please refer to:
18
+ # - GitHub: https://github.com/Wan-Video/Wan2.1
19
+ # - arXiv: https://arxiv.org/abs/2503.20314
20
+
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ import torch.nn.functional as F
26
+ import torch.utils.checkpoint
27
+
28
+ from ...configuration_utils import ConfigMixin, register_to_config
29
+ from ...loaders import FromOriginalModelMixin
30
+ from ...utils import logging
31
+ from ...utils.accelerate_utils import apply_forward_hook
32
+ from ..activations import get_activation
33
+ from ..modeling_outputs import AutoencoderKLOutput
34
+ from ..modeling_utils import ModelMixin
35
+ from .vae import DecoderOutput, DiagonalGaussianDistribution
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+ CACHE_T = 2
41
+
42
+
43
+ class QwenImageCausalConv3d(nn.Conv3d):
44
+ r"""
45
+ A custom 3D causal convolution layer with feature caching support.
46
+
47
+ This layer extends the standard Conv3D layer by ensuring causality in the time dimension and handling feature
48
+ caching for efficient inference.
49
+
50
+ Args:
51
+ in_channels (int): Number of channels in the input image
52
+ out_channels (int): Number of channels produced by the convolution
53
+ kernel_size (int or tuple): Size of the convolving kernel
54
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
55
+ padding (int or tuple, optional): Zero-padding added to all three sides of the input. Default: 0
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ in_channels: int,
61
+ out_channels: int,
62
+ kernel_size: Union[int, Tuple[int, int, int]],
63
+ stride: Union[int, Tuple[int, int, int]] = 1,
64
+ padding: Union[int, Tuple[int, int, int]] = 0,
65
+ ) -> None:
66
+ super().__init__(
67
+ in_channels=in_channels,
68
+ out_channels=out_channels,
69
+ kernel_size=kernel_size,
70
+ stride=stride,
71
+ padding=padding,
72
+ )
73
+
74
+ # Set up causal padding
75
+ self._padding = (self.padding[2], self.padding[2], self.padding[1], self.padding[1], 2 * self.padding[0], 0)
76
+ self.padding = (0, 0, 0)
77
+
78
+ def forward(self, x, cache_x=None):
79
+ padding = list(self._padding)
80
+ if cache_x is not None and self._padding[4] > 0:
81
+ cache_x = cache_x.to(x.device)
82
+ x = torch.cat([cache_x, x], dim=2)
83
+ padding[4] -= cache_x.shape[2]
84
+ x = F.pad(x, padding)
85
+ return super().forward(x)
86
+
87
+
88
+ class QwenImageRMS_norm(nn.Module):
89
+ r"""
90
+ A custom RMS normalization layer.
91
+
92
+ Args:
93
+ dim (int): The number of dimensions to normalize over.
94
+ channel_first (bool, optional): Whether the input tensor has channels as the first dimension.
95
+ Default is True.
96
+ images (bool, optional): Whether the input represents image data. Default is True.
97
+ bias (bool, optional): Whether to include a learnable bias term. Default is False.
98
+ """
99
+
100
+ def __init__(self, dim: int, channel_first: bool = True, images: bool = True, bias: bool = False) -> None:
101
+ super().__init__()
102
+ broadcastable_dims = (1, 1, 1) if not images else (1, 1)
103
+ shape = (dim, *broadcastable_dims) if channel_first else (dim,)
104
+
105
+ self.channel_first = channel_first
106
+ self.scale = dim**0.5
107
+ self.gamma = nn.Parameter(torch.ones(shape))
108
+ self.bias = nn.Parameter(torch.zeros(shape)) if bias else 0.0
109
+
110
+ def forward(self, x):
111
+ return F.normalize(x, dim=(1 if self.channel_first else -1)) * self.scale * self.gamma + self.bias
112
+
113
+
114
+ class QwenImageUpsample(nn.Upsample):
115
+ r"""
116
+ Perform upsampling while ensuring the output tensor has the same data type as the input.
117
+
118
+ Args:
119
+ x (torch.Tensor): Input tensor to be upsampled.
120
+
121
+ Returns:
122
+ torch.Tensor: Upsampled tensor with the same data type as the input.
123
+ """
124
+
125
+ def forward(self, x):
126
+ return super().forward(x.float()).type_as(x)
127
+
128
+
129
+ class QwenImageResample(nn.Module):
130
+ r"""
131
+ A custom resampling module for 2D and 3D data.
132
+
133
+ Args:
134
+ dim (int): The number of input/output channels.
135
+ mode (str): The resampling mode. Must be one of:
136
+ - 'none': No resampling (identity operation).
137
+ - 'upsample2d': 2D upsampling with nearest-exact interpolation and convolution.
138
+ - 'upsample3d': 3D upsampling with nearest-exact interpolation, convolution, and causal 3D convolution.
139
+ - 'downsample2d': 2D downsampling with zero-padding and convolution.
140
+ - 'downsample3d': 3D downsampling with zero-padding, convolution, and causal 3D convolution.
141
+ """
142
+
143
+ def __init__(self, dim: int, mode: str) -> None:
144
+ super().__init__()
145
+ self.dim = dim
146
+ self.mode = mode
147
+
148
+ # layers
149
+ if mode == "upsample2d":
150
+ self.resample = nn.Sequential(
151
+ QwenImageUpsample(scale_factor=(2.0, 2.0), mode="nearest-exact"),
152
+ nn.Conv2d(dim, dim // 2, 3, padding=1),
153
+ )
154
+ elif mode == "upsample3d":
155
+ self.resample = nn.Sequential(
156
+ QwenImageUpsample(scale_factor=(2.0, 2.0), mode="nearest-exact"),
157
+ nn.Conv2d(dim, dim // 2, 3, padding=1),
158
+ )
159
+ self.time_conv = QwenImageCausalConv3d(dim, dim * 2, (3, 1, 1), padding=(1, 0, 0))
160
+
161
+ elif mode == "downsample2d":
162
+ self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2)))
163
+ elif mode == "downsample3d":
164
+ self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2)))
165
+ self.time_conv = QwenImageCausalConv3d(dim, dim, (3, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))
166
+
167
+ else:
168
+ self.resample = nn.Identity()
169
+
170
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
171
+ b, c, t, h, w = x.size()
172
+ if self.mode == "upsample3d":
173
+ if feat_cache is not None:
174
+ idx = feat_idx[0]
175
+ if feat_cache[idx] is None:
176
+ feat_cache[idx] = "Rep"
177
+ feat_idx[0] += 1
178
+ else:
179
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
180
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] != "Rep":
181
+ # cache last frame of last two chunk
182
+ cache_x = torch.cat(
183
+ [feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2
184
+ )
185
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] == "Rep":
186
+ cache_x = torch.cat([torch.zeros_like(cache_x).to(cache_x.device), cache_x], dim=2)
187
+ if feat_cache[idx] == "Rep":
188
+ x = self.time_conv(x)
189
+ else:
190
+ x = self.time_conv(x, feat_cache[idx])
191
+ feat_cache[idx] = cache_x
192
+ feat_idx[0] += 1
193
+
194
+ x = x.reshape(b, 2, c, t, h, w)
195
+ x = torch.stack((x[:, 0, :, :, :, :], x[:, 1, :, :, :, :]), 3)
196
+ x = x.reshape(b, c, t * 2, h, w)
197
+ t = x.shape[2]
198
+ x = x.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w)
199
+ x = self.resample(x)
200
+ x = x.view(b, t, x.size(1), x.size(2), x.size(3)).permute(0, 2, 1, 3, 4)
201
+
202
+ if self.mode == "downsample3d":
203
+ if feat_cache is not None:
204
+ idx = feat_idx[0]
205
+ if feat_cache[idx] is None:
206
+ feat_cache[idx] = x.clone()
207
+ feat_idx[0] += 1
208
+ else:
209
+ cache_x = x[:, :, -1:, :, :].clone()
210
+ x = self.time_conv(torch.cat([feat_cache[idx][:, :, -1:, :, :], x], 2))
211
+ feat_cache[idx] = cache_x
212
+ feat_idx[0] += 1
213
+ return x
214
+
215
+
216
+ class QwenImageResidualBlock(nn.Module):
217
+ r"""
218
+ A custom residual block module.
219
+
220
+ Args:
221
+ in_dim (int): Number of input channels.
222
+ out_dim (int): Number of output channels.
223
+ dropout (float, optional): Dropout rate for the dropout layer. Default is 0.0.
224
+ non_linearity (str, optional): Type of non-linearity to use. Default is "silu".
225
+ """
226
+
227
+ def __init__(
228
+ self,
229
+ in_dim: int,
230
+ out_dim: int,
231
+ dropout: float = 0.0,
232
+ non_linearity: str = "silu",
233
+ ) -> None:
234
+ super().__init__()
235
+ self.in_dim = in_dim
236
+ self.out_dim = out_dim
237
+ self.nonlinearity = get_activation(non_linearity)
238
+
239
+ # layers
240
+ self.norm1 = QwenImageRMS_norm(in_dim, images=False)
241
+ self.conv1 = QwenImageCausalConv3d(in_dim, out_dim, 3, padding=1)
242
+ self.norm2 = QwenImageRMS_norm(out_dim, images=False)
243
+ self.dropout = nn.Dropout(dropout)
244
+ self.conv2 = QwenImageCausalConv3d(out_dim, out_dim, 3, padding=1)
245
+ self.conv_shortcut = QwenImageCausalConv3d(in_dim, out_dim, 1) if in_dim != out_dim else nn.Identity()
246
+
247
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
248
+ # Apply shortcut connection
249
+ h = self.conv_shortcut(x)
250
+
251
+ # First normalization and activation
252
+ x = self.norm1(x)
253
+ x = self.nonlinearity(x)
254
+
255
+ if feat_cache is not None:
256
+ idx = feat_idx[0]
257
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
258
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
259
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
260
+
261
+ x = self.conv1(x, feat_cache[idx])
262
+ feat_cache[idx] = cache_x
263
+ feat_idx[0] += 1
264
+ else:
265
+ x = self.conv1(x)
266
+
267
+ # Second normalization and activation
268
+ x = self.norm2(x)
269
+ x = self.nonlinearity(x)
270
+
271
+ # Dropout
272
+ x = self.dropout(x)
273
+
274
+ if feat_cache is not None:
275
+ idx = feat_idx[0]
276
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
277
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
278
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
279
+
280
+ x = self.conv2(x, feat_cache[idx])
281
+ feat_cache[idx] = cache_x
282
+ feat_idx[0] += 1
283
+ else:
284
+ x = self.conv2(x)
285
+
286
+ # Add residual connection
287
+ return x + h
288
+
289
+
290
+ class QwenImageAttentionBlock(nn.Module):
291
+ r"""
292
+ Causal self-attention with a single head.
293
+
294
+ Args:
295
+ dim (int): The number of channels in the input tensor.
296
+ """
297
+
298
+ def __init__(self, dim):
299
+ super().__init__()
300
+ self.dim = dim
301
+
302
+ # layers
303
+ self.norm = QwenImageRMS_norm(dim)
304
+ self.to_qkv = nn.Conv2d(dim, dim * 3, 1)
305
+ self.proj = nn.Conv2d(dim, dim, 1)
306
+
307
+ def forward(self, x):
308
+ identity = x
309
+ batch_size, channels, time, height, width = x.size()
310
+
311
+ x = x.permute(0, 2, 1, 3, 4).reshape(batch_size * time, channels, height, width)
312
+ x = self.norm(x)
313
+
314
+ # compute query, key, value
315
+ qkv = self.to_qkv(x)
316
+ qkv = qkv.reshape(batch_size * time, 1, channels * 3, -1)
317
+ qkv = qkv.permute(0, 1, 3, 2).contiguous()
318
+ q, k, v = qkv.chunk(3, dim=-1)
319
+
320
+ # apply attention
321
+ x = F.scaled_dot_product_attention(q, k, v)
322
+
323
+ x = x.squeeze(1).permute(0, 2, 1).reshape(batch_size * time, channels, height, width)
324
+
325
+ # output projection
326
+ x = self.proj(x)
327
+
328
+ # Reshape back: [(b*t), c, h, w] -> [b, c, t, h, w]
329
+ x = x.view(batch_size, time, channels, height, width)
330
+ x = x.permute(0, 2, 1, 3, 4)
331
+
332
+ return x + identity
333
+
334
+
335
+ class QwenImageMidBlock(nn.Module):
336
+ """
337
+ Middle block for QwenImageVAE encoder and decoder.
338
+
339
+ Args:
340
+ dim (int): Number of input/output channels.
341
+ dropout (float): Dropout rate.
342
+ non_linearity (str): Type of non-linearity to use.
343
+ """
344
+
345
+ def __init__(self, dim: int, dropout: float = 0.0, non_linearity: str = "silu", num_layers: int = 1):
346
+ super().__init__()
347
+ self.dim = dim
348
+
349
+ # Create the components
350
+ resnets = [QwenImageResidualBlock(dim, dim, dropout, non_linearity)]
351
+ attentions = []
352
+ for _ in range(num_layers):
353
+ attentions.append(QwenImageAttentionBlock(dim))
354
+ resnets.append(QwenImageResidualBlock(dim, dim, dropout, non_linearity))
355
+ self.attentions = nn.ModuleList(attentions)
356
+ self.resnets = nn.ModuleList(resnets)
357
+
358
+ self.gradient_checkpointing = False
359
+
360
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
361
+ # First residual block
362
+ x = self.resnets[0](x, feat_cache, feat_idx)
363
+
364
+ # Process through attention and residual blocks
365
+ for attn, resnet in zip(self.attentions, self.resnets[1:]):
366
+ if attn is not None:
367
+ x = attn(x)
368
+
369
+ x = resnet(x, feat_cache, feat_idx)
370
+
371
+ return x
372
+
373
+
374
+ class QwenImageEncoder3d(nn.Module):
375
+ r"""
376
+ A 3D encoder module.
377
+
378
+ Args:
379
+ dim (int): The base number of channels in the first layer.
380
+ z_dim (int): The dimensionality of the latent space.
381
+ dim_mult (list of int): Multipliers for the number of channels in each block.
382
+ num_res_blocks (int): Number of residual blocks in each block.
383
+ attn_scales (list of float): Scales at which to apply attention mechanisms.
384
+ temperal_downsample (list of bool): Whether to downsample temporally in each block.
385
+ dropout (float): Dropout rate for the dropout layers.
386
+ non_linearity (str): Type of non-linearity to use.
387
+ """
388
+
389
+ def __init__(
390
+ self,
391
+ dim=128,
392
+ z_dim=4,
393
+ dim_mult=[1, 2, 4, 4],
394
+ num_res_blocks=2,
395
+ attn_scales=[],
396
+ temperal_downsample=[True, True, False],
397
+ dropout=0.0,
398
+ non_linearity: str = "silu",
399
+ ):
400
+ super().__init__()
401
+ self.dim = dim
402
+ self.z_dim = z_dim
403
+ self.dim_mult = dim_mult
404
+ self.num_res_blocks = num_res_blocks
405
+ self.attn_scales = attn_scales
406
+ self.temperal_downsample = temperal_downsample
407
+ self.nonlinearity = get_activation(non_linearity)
408
+
409
+ # dimensions
410
+ dims = [dim * u for u in [1] + dim_mult]
411
+ scale = 1.0
412
+
413
+ # init block
414
+ self.conv_in = QwenImageCausalConv3d(3, dims[0], 3, padding=1)
415
+
416
+ # downsample blocks
417
+ self.down_blocks = nn.ModuleList([])
418
+ for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
419
+ # residual (+attention) blocks
420
+ for _ in range(num_res_blocks):
421
+ self.down_blocks.append(QwenImageResidualBlock(in_dim, out_dim, dropout))
422
+ if scale in attn_scales:
423
+ self.down_blocks.append(QwenImageAttentionBlock(out_dim))
424
+ in_dim = out_dim
425
+
426
+ # downsample block
427
+ if i != len(dim_mult) - 1:
428
+ mode = "downsample3d" if temperal_downsample[i] else "downsample2d"
429
+ self.down_blocks.append(QwenImageResample(out_dim, mode=mode))
430
+ scale /= 2.0
431
+
432
+ # middle blocks
433
+ self.mid_block = QwenImageMidBlock(out_dim, dropout, non_linearity, num_layers=1)
434
+
435
+ # output blocks
436
+ self.norm_out = QwenImageRMS_norm(out_dim, images=False)
437
+ self.conv_out = QwenImageCausalConv3d(out_dim, z_dim, 3, padding=1)
438
+
439
+ self.gradient_checkpointing = False
440
+
441
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
442
+ if feat_cache is not None:
443
+ idx = feat_idx[0]
444
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
445
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
446
+ # cache last frame of last two chunk
447
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
448
+ x = self.conv_in(x, feat_cache[idx])
449
+ feat_cache[idx] = cache_x
450
+ feat_idx[0] += 1
451
+ else:
452
+ x = self.conv_in(x)
453
+
454
+ ## downsamples
455
+ for layer in self.down_blocks:
456
+ if feat_cache is not None:
457
+ x = layer(x, feat_cache, feat_idx)
458
+ else:
459
+ x = layer(x)
460
+
461
+ ## middle
462
+ x = self.mid_block(x, feat_cache, feat_idx)
463
+
464
+ ## head
465
+ x = self.norm_out(x)
466
+ x = self.nonlinearity(x)
467
+ if feat_cache is not None:
468
+ idx = feat_idx[0]
469
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
470
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
471
+ # cache last frame of last two chunk
472
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
473
+ x = self.conv_out(x, feat_cache[idx])
474
+ feat_cache[idx] = cache_x
475
+ feat_idx[0] += 1
476
+ else:
477
+ x = self.conv_out(x)
478
+ return x
479
+
480
+
481
+ class QwenImageUpBlock(nn.Module):
482
+ """
483
+ A block that handles upsampling for the QwenImageVAE decoder.
484
+
485
+ Args:
486
+ in_dim (int): Input dimension
487
+ out_dim (int): Output dimension
488
+ num_res_blocks (int): Number of residual blocks
489
+ dropout (float): Dropout rate
490
+ upsample_mode (str, optional): Mode for upsampling ('upsample2d' or 'upsample3d')
491
+ non_linearity (str): Type of non-linearity to use
492
+ """
493
+
494
+ def __init__(
495
+ self,
496
+ in_dim: int,
497
+ out_dim: int,
498
+ num_res_blocks: int,
499
+ dropout: float = 0.0,
500
+ upsample_mode: Optional[str] = None,
501
+ non_linearity: str = "silu",
502
+ ):
503
+ super().__init__()
504
+ self.in_dim = in_dim
505
+ self.out_dim = out_dim
506
+
507
+ # Create layers list
508
+ resnets = []
509
+ # Add residual blocks and attention if needed
510
+ current_dim = in_dim
511
+ for _ in range(num_res_blocks + 1):
512
+ resnets.append(QwenImageResidualBlock(current_dim, out_dim, dropout, non_linearity))
513
+ current_dim = out_dim
514
+
515
+ self.resnets = nn.ModuleList(resnets)
516
+
517
+ # Add upsampling layer if needed
518
+ self.upsamplers = None
519
+ if upsample_mode is not None:
520
+ self.upsamplers = nn.ModuleList([QwenImageResample(out_dim, mode=upsample_mode)])
521
+
522
+ self.gradient_checkpointing = False
523
+
524
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
525
+ """
526
+ Forward pass through the upsampling block.
527
+
528
+ Args:
529
+ x (torch.Tensor): Input tensor
530
+ feat_cache (list, optional): Feature cache for causal convolutions
531
+ feat_idx (list, optional): Feature index for cache management
532
+
533
+ Returns:
534
+ torch.Tensor: Output tensor
535
+ """
536
+ for resnet in self.resnets:
537
+ if feat_cache is not None:
538
+ x = resnet(x, feat_cache, feat_idx)
539
+ else:
540
+ x = resnet(x)
541
+
542
+ if self.upsamplers is not None:
543
+ if feat_cache is not None:
544
+ x = self.upsamplers[0](x, feat_cache, feat_idx)
545
+ else:
546
+ x = self.upsamplers[0](x)
547
+ return x
548
+
549
+
550
+ class QwenImageDecoder3d(nn.Module):
551
+ r"""
552
+ A 3D decoder module.
553
+
554
+ Args:
555
+ dim (int): The base number of channels in the first layer.
556
+ z_dim (int): The dimensionality of the latent space.
557
+ dim_mult (list of int): Multipliers for the number of channels in each block.
558
+ num_res_blocks (int): Number of residual blocks in each block.
559
+ attn_scales (list of float): Scales at which to apply attention mechanisms.
560
+ temperal_upsample (list of bool): Whether to upsample temporally in each block.
561
+ dropout (float): Dropout rate for the dropout layers.
562
+ non_linearity (str): Type of non-linearity to use.
563
+ """
564
+
565
+ def __init__(
566
+ self,
567
+ dim=128,
568
+ z_dim=4,
569
+ dim_mult=[1, 2, 4, 4],
570
+ num_res_blocks=2,
571
+ attn_scales=[],
572
+ temperal_upsample=[False, True, True],
573
+ dropout=0.0,
574
+ non_linearity: str = "silu",
575
+ ):
576
+ super().__init__()
577
+ self.dim = dim
578
+ self.z_dim = z_dim
579
+ self.dim_mult = dim_mult
580
+ self.num_res_blocks = num_res_blocks
581
+ self.attn_scales = attn_scales
582
+ self.temperal_upsample = temperal_upsample
583
+
584
+ self.nonlinearity = get_activation(non_linearity)
585
+
586
+ # dimensions
587
+ dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]]
588
+ scale = 1.0 / 2 ** (len(dim_mult) - 2)
589
+
590
+ # init block
591
+ self.conv_in = QwenImageCausalConv3d(z_dim, dims[0], 3, padding=1)
592
+
593
+ # middle blocks
594
+ self.mid_block = QwenImageMidBlock(dims[0], dropout, non_linearity, num_layers=1)
595
+
596
+ # upsample blocks
597
+ self.up_blocks = nn.ModuleList([])
598
+ for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
599
+ # residual (+attention) blocks
600
+ if i > 0:
601
+ in_dim = in_dim // 2
602
+
603
+ # Determine if we need upsampling
604
+ upsample_mode = None
605
+ if i != len(dim_mult) - 1:
606
+ upsample_mode = "upsample3d" if temperal_upsample[i] else "upsample2d"
607
+
608
+ # Create and add the upsampling block
609
+ up_block = QwenImageUpBlock(
610
+ in_dim=in_dim,
611
+ out_dim=out_dim,
612
+ num_res_blocks=num_res_blocks,
613
+ dropout=dropout,
614
+ upsample_mode=upsample_mode,
615
+ non_linearity=non_linearity,
616
+ )
617
+ self.up_blocks.append(up_block)
618
+
619
+ # Update scale for next iteration
620
+ if upsample_mode is not None:
621
+ scale *= 2.0
622
+
623
+ # output blocks
624
+ self.norm_out = QwenImageRMS_norm(out_dim, images=False)
625
+ self.conv_out = QwenImageCausalConv3d(out_dim, 3, 3, padding=1)
626
+
627
+ self.gradient_checkpointing = False
628
+
629
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
630
+ ## conv1
631
+ if feat_cache is not None:
632
+ idx = feat_idx[0]
633
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
634
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
635
+ # cache last frame of last two chunk
636
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
637
+ x = self.conv_in(x, feat_cache[idx])
638
+ feat_cache[idx] = cache_x
639
+ feat_idx[0] += 1
640
+ else:
641
+ x = self.conv_in(x)
642
+
643
+ ## middle
644
+ x = self.mid_block(x, feat_cache, feat_idx)
645
+
646
+ ## upsamples
647
+ for up_block in self.up_blocks:
648
+ x = up_block(x, feat_cache, feat_idx)
649
+
650
+ ## head
651
+ x = self.norm_out(x)
652
+ x = self.nonlinearity(x)
653
+ if feat_cache is not None:
654
+ idx = feat_idx[0]
655
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
656
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
657
+ # cache last frame of last two chunk
658
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
659
+ x = self.conv_out(x, feat_cache[idx])
660
+ feat_cache[idx] = cache_x
661
+ feat_idx[0] += 1
662
+ else:
663
+ x = self.conv_out(x)
664
+ return x
665
+
666
+
667
+ class AutoencoderKLQwenImage(ModelMixin, ConfigMixin, FromOriginalModelMixin):
668
+ r"""
669
+ A VAE model with KL loss for encoding videos into latents and decoding latent representations into videos.
670
+
671
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
672
+ for all models (such as downloading or saving).
673
+ """
674
+
675
+ _supports_gradient_checkpointing = False
676
+
677
+ # fmt: off
678
+ @register_to_config
679
+ def __init__(
680
+ self,
681
+ base_dim: int = 96,
682
+ z_dim: int = 16,
683
+ dim_mult: Tuple[int] = [1, 2, 4, 4],
684
+ num_res_blocks: int = 2,
685
+ attn_scales: List[float] = [],
686
+ temperal_downsample: List[bool] = [False, True, True],
687
+ dropout: float = 0.0,
688
+ latents_mean: List[float] = [-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921],
689
+ latents_std: List[float] = [2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160],
690
+ ) -> None:
691
+ # fmt: on
692
+ super().__init__()
693
+
694
+ self.z_dim = z_dim
695
+ self.temperal_downsample = temperal_downsample
696
+ self.temperal_upsample = temperal_downsample[::-1]
697
+
698
+ self.encoder = QwenImageEncoder3d(
699
+ base_dim, z_dim * 2, dim_mult, num_res_blocks, attn_scales, self.temperal_downsample, dropout
700
+ )
701
+ self.quant_conv = QwenImageCausalConv3d(z_dim * 2, z_dim * 2, 1)
702
+ self.post_quant_conv = QwenImageCausalConv3d(z_dim, z_dim, 1)
703
+
704
+ self.decoder = QwenImageDecoder3d(
705
+ base_dim, z_dim, dim_mult, num_res_blocks, attn_scales, self.temperal_upsample, dropout
706
+ )
707
+
708
+ self.spatial_compression_ratio = 2 ** len(self.temperal_downsample)
709
+
710
+ # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension
711
+ # to perform decoding of a single video latent at a time.
712
+ self.use_slicing = False
713
+
714
+ # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent
715
+ # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the
716
+ # intermediate tiles together, the memory requirement can be lowered.
717
+ self.use_tiling = False
718
+
719
+ # The minimal tile height and width for spatial tiling to be used
720
+ self.tile_sample_min_height = 256
721
+ self.tile_sample_min_width = 256
722
+
723
+ # The minimal distance between two spatial tiles
724
+ self.tile_sample_stride_height = 192
725
+ self.tile_sample_stride_width = 192
726
+
727
+ # Precompute and cache conv counts for encoder and decoder for clear_cache speedup
728
+ self._cached_conv_counts = {
729
+ "decoder": sum(isinstance(m, QwenImageCausalConv3d) for m in self.decoder.modules())
730
+ if self.decoder is not None
731
+ else 0,
732
+ "encoder": sum(isinstance(m, QwenImageCausalConv3d) for m in self.encoder.modules())
733
+ if self.encoder is not None
734
+ else 0,
735
+ }
736
+
737
+ def enable_tiling(
738
+ self,
739
+ tile_sample_min_height: Optional[int] = None,
740
+ tile_sample_min_width: Optional[int] = None,
741
+ tile_sample_stride_height: Optional[float] = None,
742
+ tile_sample_stride_width: Optional[float] = None,
743
+ ) -> None:
744
+ r"""
745
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
746
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
747
+ processing larger images.
748
+
749
+ Args:
750
+ tile_sample_min_height (`int`, *optional*):
751
+ The minimum height required for a sample to be separated into tiles across the height dimension.
752
+ tile_sample_min_width (`int`, *optional*):
753
+ The minimum width required for a sample to be separated into tiles across the width dimension.
754
+ tile_sample_stride_height (`int`, *optional*):
755
+ The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
756
+ no tiling artifacts produced across the height dimension.
757
+ tile_sample_stride_width (`int`, *optional*):
758
+ The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
759
+ artifacts produced across the width dimension.
760
+ """
761
+ self.use_tiling = True
762
+ self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
763
+ self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
764
+ self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height
765
+ self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width
766
+
767
+ def disable_tiling(self) -> None:
768
+ r"""
769
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
770
+ decoding in one step.
771
+ """
772
+ self.use_tiling = False
773
+
774
+ def enable_slicing(self) -> None:
775
+ r"""
776
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
777
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
778
+ """
779
+ self.use_slicing = True
780
+
781
+ def disable_slicing(self) -> None:
782
+ r"""
783
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
784
+ decoding in one step.
785
+ """
786
+ self.use_slicing = False
787
+
788
+ def clear_cache(self):
789
+ def _count_conv3d(model):
790
+ count = 0
791
+ for m in model.modules():
792
+ if isinstance(m, QwenImageCausalConv3d):
793
+ count += 1
794
+ return count
795
+
796
+ self._conv_num = _count_conv3d(self.decoder)
797
+ self._conv_idx = [0]
798
+ self._feat_map = [None] * self._conv_num
799
+ # cache encode
800
+ self._enc_conv_num = _count_conv3d(self.encoder)
801
+ self._enc_conv_idx = [0]
802
+ self._enc_feat_map = [None] * self._enc_conv_num
803
+
804
+ def _encode(self, x: torch.Tensor):
805
+ _, _, num_frame, height, width = x.shape
806
+
807
+ if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height):
808
+ return self.tiled_encode(x)
809
+
810
+ self.clear_cache()
811
+ iter_ = 1 + (num_frame - 1) // 4
812
+ for i in range(iter_):
813
+ self._enc_conv_idx = [0]
814
+ if i == 0:
815
+ out = self.encoder(x[:, :, :1, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx)
816
+ else:
817
+ out_ = self.encoder(
818
+ x[:, :, 1 + 4 * (i - 1) : 1 + 4 * i, :, :],
819
+ feat_cache=self._enc_feat_map,
820
+ feat_idx=self._enc_conv_idx,
821
+ )
822
+ out = torch.cat([out, out_], 2)
823
+
824
+ enc = self.quant_conv(out)
825
+ self.clear_cache()
826
+ return enc
827
+
828
+ @apply_forward_hook
829
+ def encode(
830
+ self, x: torch.Tensor, return_dict: bool = True
831
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
832
+ r"""
833
+ Encode a batch of images into latents.
834
+
835
+ Args:
836
+ x (`torch.Tensor`): Input batch of images.
837
+ return_dict (`bool`, *optional*, defaults to `True`):
838
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
839
+
840
+ Returns:
841
+ The latent representations of the encoded videos. If `return_dict` is True, a
842
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
843
+ """
844
+ if self.use_slicing and x.shape[0] > 1:
845
+ encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
846
+ h = torch.cat(encoded_slices)
847
+ else:
848
+ h = self._encode(x)
849
+ posterior = DiagonalGaussianDistribution(h)
850
+
851
+ if not return_dict:
852
+ return (posterior,)
853
+ return AutoencoderKLOutput(latent_dist=posterior)
854
+
855
+ def _decode(self, z: torch.Tensor, return_dict: bool = True):
856
+ _, _, num_frame, height, width = z.shape
857
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
858
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
859
+
860
+ if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height):
861
+ return self.tiled_decode(z, return_dict=return_dict)
862
+
863
+ self.clear_cache()
864
+ x = self.post_quant_conv(z)
865
+ for i in range(num_frame):
866
+ self._conv_idx = [0]
867
+ if i == 0:
868
+ out = self.decoder(x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx)
869
+ else:
870
+ out_ = self.decoder(x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx)
871
+ out = torch.cat([out, out_], 2)
872
+
873
+ out = torch.clamp(out, min=-1.0, max=1.0)
874
+ self.clear_cache()
875
+ if not return_dict:
876
+ return (out,)
877
+
878
+ return DecoderOutput(sample=out)
879
+
880
+ @apply_forward_hook
881
+ def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
882
+ r"""
883
+ Decode a batch of images.
884
+
885
+ Args:
886
+ z (`torch.Tensor`): Input batch of latent vectors.
887
+ return_dict (`bool`, *optional*, defaults to `True`):
888
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
889
+
890
+ Returns:
891
+ [`~models.vae.DecoderOutput`] or `tuple`:
892
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
893
+ returned.
894
+ """
895
+ if self.use_slicing and z.shape[0] > 1:
896
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
897
+ decoded = torch.cat(decoded_slices)
898
+ else:
899
+ decoded = self._decode(z).sample
900
+
901
+ if not return_dict:
902
+ return (decoded,)
903
+ return DecoderOutput(sample=decoded)
904
+
905
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
906
+ blend_extent = min(a.shape[-2], b.shape[-2], blend_extent)
907
+ for y in range(blend_extent):
908
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
909
+ y / blend_extent
910
+ )
911
+ return b
912
+
913
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
914
+ blend_extent = min(a.shape[-1], b.shape[-1], blend_extent)
915
+ for x in range(blend_extent):
916
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
917
+ x / blend_extent
918
+ )
919
+ return b
920
+
921
+ def tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput:
922
+ r"""Encode a batch of images using a tiled encoder.
923
+
924
+ Args:
925
+ x (`torch.Tensor`): Input batch of videos.
926
+
927
+ Returns:
928
+ `torch.Tensor`:
929
+ The latent representation of the encoded videos.
930
+ """
931
+ _, _, num_frames, height, width = x.shape
932
+ latent_height = height // self.spatial_compression_ratio
933
+ latent_width = width // self.spatial_compression_ratio
934
+
935
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
936
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
937
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
938
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
939
+
940
+ blend_height = tile_latent_min_height - tile_latent_stride_height
941
+ blend_width = tile_latent_min_width - tile_latent_stride_width
942
+
943
+ # Split x into overlapping tiles and encode them separately.
944
+ # The tiles have an overlap to avoid seams between tiles.
945
+ rows = []
946
+ for i in range(0, height, self.tile_sample_stride_height):
947
+ row = []
948
+ for j in range(0, width, self.tile_sample_stride_width):
949
+ self.clear_cache()
950
+ time = []
951
+ frame_range = 1 + (num_frames - 1) // 4
952
+ for k in range(frame_range):
953
+ self._enc_conv_idx = [0]
954
+ if k == 0:
955
+ tile = x[:, :, :1, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width]
956
+ else:
957
+ tile = x[
958
+ :,
959
+ :,
960
+ 1 + 4 * (k - 1) : 1 + 4 * k,
961
+ i : i + self.tile_sample_min_height,
962
+ j : j + self.tile_sample_min_width,
963
+ ]
964
+ tile = self.encoder(tile, feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx)
965
+ tile = self.quant_conv(tile)
966
+ time.append(tile)
967
+ row.append(torch.cat(time, dim=2))
968
+ rows.append(row)
969
+ self.clear_cache()
970
+
971
+ result_rows = []
972
+ for i, row in enumerate(rows):
973
+ result_row = []
974
+ for j, tile in enumerate(row):
975
+ # blend the above tile and the left tile
976
+ # to the current tile and add the current tile to the result row
977
+ if i > 0:
978
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
979
+ if j > 0:
980
+ tile = self.blend_h(row[j - 1], tile, blend_width)
981
+ result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width])
982
+ result_rows.append(torch.cat(result_row, dim=-1))
983
+
984
+ enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width]
985
+ return enc
986
+
987
+ def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
988
+ r"""
989
+ Decode a batch of images using a tiled decoder.
990
+
991
+ Args:
992
+ z (`torch.Tensor`): Input batch of latent vectors.
993
+ return_dict (`bool`, *optional*, defaults to `True`):
994
+ Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
995
+
996
+ Returns:
997
+ [`~models.vae.DecoderOutput`] or `tuple`:
998
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
999
+ returned.
1000
+ """
1001
+ _, _, num_frames, height, width = z.shape
1002
+ sample_height = height * self.spatial_compression_ratio
1003
+ sample_width = width * self.spatial_compression_ratio
1004
+
1005
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1006
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1007
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
1008
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
1009
+
1010
+ blend_height = self.tile_sample_min_height - self.tile_sample_stride_height
1011
+ blend_width = self.tile_sample_min_width - self.tile_sample_stride_width
1012
+
1013
+ # Split z into overlapping tiles and decode them separately.
1014
+ # The tiles have an overlap to avoid seams between tiles.
1015
+ rows = []
1016
+ for i in range(0, height, tile_latent_stride_height):
1017
+ row = []
1018
+ for j in range(0, width, tile_latent_stride_width):
1019
+ self.clear_cache()
1020
+ time = []
1021
+ for k in range(num_frames):
1022
+ self._conv_idx = [0]
1023
+ tile = z[:, :, k : k + 1, i : i + tile_latent_min_height, j : j + tile_latent_min_width]
1024
+ tile = self.post_quant_conv(tile)
1025
+ decoded = self.decoder(tile, feat_cache=self._feat_map, feat_idx=self._conv_idx)
1026
+ time.append(decoded)
1027
+ row.append(torch.cat(time, dim=2))
1028
+ rows.append(row)
1029
+ self.clear_cache()
1030
+
1031
+ result_rows = []
1032
+ for i, row in enumerate(rows):
1033
+ result_row = []
1034
+ for j, tile in enumerate(row):
1035
+ # blend the above tile and the left tile
1036
+ # to the current tile and add the current tile to the result row
1037
+ if i > 0:
1038
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
1039
+ if j > 0:
1040
+ tile = self.blend_h(row[j - 1], tile, blend_width)
1041
+ result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width])
1042
+ result_rows.append(torch.cat(result_row, dim=-1))
1043
+
1044
+ dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width]
1045
+
1046
+ if not return_dict:
1047
+ return (dec,)
1048
+ return DecoderOutput(sample=dec)
1049
+
1050
+ def forward(
1051
+ self,
1052
+ sample: torch.Tensor,
1053
+ sample_posterior: bool = False,
1054
+ return_dict: bool = True,
1055
+ generator: Optional[torch.Generator] = None,
1056
+ ) -> Union[DecoderOutput, torch.Tensor]:
1057
+ """
1058
+ Args:
1059
+ sample (`torch.Tensor`): Input sample.
1060
+ return_dict (`bool`, *optional*, defaults to `True`):
1061
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
1062
+ """
1063
+ x = sample
1064
+ posterior = self.encode(x).latent_dist
1065
+ if sample_posterior:
1066
+ z = posterior.sample(generator=generator)
1067
+ else:
1068
+ z = posterior.mode()
1069
+ dec = self.decode(z, return_dict=return_dict)
1070
+ return dec
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import itertools
15
+ from typing import Dict, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from ...configuration_utils import ConfigMixin, register_to_config
21
+ from ...utils.accelerate_utils import apply_forward_hook
22
+ from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor
23
+ from ..modeling_outputs import AutoencoderKLOutput
24
+ from ..modeling_utils import ModelMixin
25
+ from ..unets.unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder
26
+ from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder
27
+
28
+
29
+ class TemporalDecoder(nn.Module):
30
+ def __init__(
31
+ self,
32
+ in_channels: int = 4,
33
+ out_channels: int = 3,
34
+ block_out_channels: Tuple[int] = (128, 256, 512, 512),
35
+ layers_per_block: int = 2,
36
+ ):
37
+ super().__init__()
38
+ self.layers_per_block = layers_per_block
39
+
40
+ self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1)
41
+ self.mid_block = MidBlockTemporalDecoder(
42
+ num_layers=self.layers_per_block,
43
+ in_channels=block_out_channels[-1],
44
+ out_channels=block_out_channels[-1],
45
+ attention_head_dim=block_out_channels[-1],
46
+ )
47
+
48
+ # up
49
+ self.up_blocks = nn.ModuleList([])
50
+ reversed_block_out_channels = list(reversed(block_out_channels))
51
+ output_channel = reversed_block_out_channels[0]
52
+ for i in range(len(block_out_channels)):
53
+ prev_output_channel = output_channel
54
+ output_channel = reversed_block_out_channels[i]
55
+
56
+ is_final_block = i == len(block_out_channels) - 1
57
+ up_block = UpBlockTemporalDecoder(
58
+ num_layers=self.layers_per_block + 1,
59
+ in_channels=prev_output_channel,
60
+ out_channels=output_channel,
61
+ add_upsample=not is_final_block,
62
+ )
63
+ self.up_blocks.append(up_block)
64
+ prev_output_channel = output_channel
65
+
66
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-6)
67
+
68
+ self.conv_act = nn.SiLU()
69
+ self.conv_out = torch.nn.Conv2d(
70
+ in_channels=block_out_channels[0],
71
+ out_channels=out_channels,
72
+ kernel_size=3,
73
+ padding=1,
74
+ )
75
+
76
+ conv_out_kernel_size = (3, 1, 1)
77
+ padding = [int(k // 2) for k in conv_out_kernel_size]
78
+ self.time_conv_out = torch.nn.Conv3d(
79
+ in_channels=out_channels,
80
+ out_channels=out_channels,
81
+ kernel_size=conv_out_kernel_size,
82
+ padding=padding,
83
+ )
84
+
85
+ self.gradient_checkpointing = False
86
+
87
+ def forward(
88
+ self,
89
+ sample: torch.Tensor,
90
+ image_only_indicator: torch.Tensor,
91
+ num_frames: int = 1,
92
+ ) -> torch.Tensor:
93
+ r"""The forward method of the `Decoder` class."""
94
+
95
+ sample = self.conv_in(sample)
96
+
97
+ upscale_dtype = next(itertools.chain(self.up_blocks.parameters(), self.up_blocks.buffers())).dtype
98
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
99
+ # middle
100
+ sample = self._gradient_checkpointing_func(
101
+ self.mid_block,
102
+ sample,
103
+ image_only_indicator,
104
+ )
105
+ sample = sample.to(upscale_dtype)
106
+
107
+ # up
108
+ for up_block in self.up_blocks:
109
+ sample = self._gradient_checkpointing_func(
110
+ up_block,
111
+ sample,
112
+ image_only_indicator,
113
+ )
114
+ else:
115
+ # middle
116
+ sample = self.mid_block(sample, image_only_indicator=image_only_indicator)
117
+ sample = sample.to(upscale_dtype)
118
+
119
+ # up
120
+ for up_block in self.up_blocks:
121
+ sample = up_block(sample, image_only_indicator=image_only_indicator)
122
+
123
+ # post-process
124
+ sample = self.conv_norm_out(sample)
125
+ sample = self.conv_act(sample)
126
+ sample = self.conv_out(sample)
127
+
128
+ batch_frames, channels, height, width = sample.shape
129
+ batch_size = batch_frames // num_frames
130
+ sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4)
131
+ sample = self.time_conv_out(sample)
132
+
133
+ sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width)
134
+
135
+ return sample
136
+
137
+
138
+ class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin):
139
+ r"""
140
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
141
+
142
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
143
+ for all models (such as downloading or saving).
144
+
145
+ Parameters:
146
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
147
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
148
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
149
+ Tuple of downsample block types.
150
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
151
+ Tuple of block output channels.
152
+ layers_per_block: (`int`, *optional*, defaults to 1): Number of layers per block.
153
+ latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
154
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
155
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
156
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
157
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
158
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
159
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
160
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
161
+ Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper.
162
+ force_upcast (`bool`, *optional*, default to `True`):
163
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
164
+ can be fine-tuned / trained to a lower range without losing too much precision in which case `force_upcast`
165
+ can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
166
+ """
167
+
168
+ _supports_gradient_checkpointing = True
169
+
170
+ @register_to_config
171
+ def __init__(
172
+ self,
173
+ in_channels: int = 3,
174
+ out_channels: int = 3,
175
+ down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
176
+ block_out_channels: Tuple[int] = (64,),
177
+ layers_per_block: int = 1,
178
+ latent_channels: int = 4,
179
+ sample_size: int = 32,
180
+ scaling_factor: float = 0.18215,
181
+ force_upcast: float = True,
182
+ ):
183
+ super().__init__()
184
+
185
+ # pass init params to Encoder
186
+ self.encoder = Encoder(
187
+ in_channels=in_channels,
188
+ out_channels=latent_channels,
189
+ down_block_types=down_block_types,
190
+ block_out_channels=block_out_channels,
191
+ layers_per_block=layers_per_block,
192
+ double_z=True,
193
+ )
194
+
195
+ # pass init params to Decoder
196
+ self.decoder = TemporalDecoder(
197
+ in_channels=latent_channels,
198
+ out_channels=out_channels,
199
+ block_out_channels=block_out_channels,
200
+ layers_per_block=layers_per_block,
201
+ )
202
+
203
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
204
+
205
+ @property
206
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
207
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
208
+ r"""
209
+ Returns:
210
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
211
+ indexed by its weight name.
212
+ """
213
+ # set recursively
214
+ processors = {}
215
+
216
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
217
+ if hasattr(module, "get_processor"):
218
+ processors[f"{name}.processor"] = module.get_processor()
219
+
220
+ for sub_name, child in module.named_children():
221
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
222
+
223
+ return processors
224
+
225
+ for name, module in self.named_children():
226
+ fn_recursive_add_processors(name, module, processors)
227
+
228
+ return processors
229
+
230
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
231
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
232
+ r"""
233
+ Sets the attention processor to use to compute attention.
234
+
235
+ Parameters:
236
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
237
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
238
+ for **all** `Attention` layers.
239
+
240
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
241
+ processor. This is strongly recommended when setting trainable attention processors.
242
+
243
+ """
244
+ count = len(self.attn_processors.keys())
245
+
246
+ if isinstance(processor, dict) and len(processor) != count:
247
+ raise ValueError(
248
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
249
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
250
+ )
251
+
252
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
253
+ if hasattr(module, "set_processor"):
254
+ if not isinstance(processor, dict):
255
+ module.set_processor(processor)
256
+ else:
257
+ module.set_processor(processor.pop(f"{name}.processor"))
258
+
259
+ for sub_name, child in module.named_children():
260
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
261
+
262
+ for name, module in self.named_children():
263
+ fn_recursive_attn_processor(name, module, processor)
264
+
265
+ def set_default_attn_processor(self):
266
+ """
267
+ Disables custom attention processors and sets the default attention implementation.
268
+ """
269
+ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
270
+ processor = AttnProcessor()
271
+ else:
272
+ raise ValueError(
273
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
274
+ )
275
+
276
+ self.set_attn_processor(processor)
277
+
278
+ @apply_forward_hook
279
+ def encode(
280
+ self, x: torch.Tensor, return_dict: bool = True
281
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
282
+ """
283
+ Encode a batch of images into latents.
284
+
285
+ Args:
286
+ x (`torch.Tensor`): Input batch of images.
287
+ return_dict (`bool`, *optional*, defaults to `True`):
288
+ Whether to return a [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] instead of a plain
289
+ tuple.
290
+
291
+ Returns:
292
+ The latent representations of the encoded images. If `return_dict` is True, a
293
+ [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is
294
+ returned.
295
+ """
296
+ h = self.encoder(x)
297
+ moments = self.quant_conv(h)
298
+ posterior = DiagonalGaussianDistribution(moments)
299
+
300
+ if not return_dict:
301
+ return (posterior,)
302
+
303
+ return AutoencoderKLOutput(latent_dist=posterior)
304
+
305
+ @apply_forward_hook
306
+ def decode(
307
+ self,
308
+ z: torch.Tensor,
309
+ num_frames: int,
310
+ return_dict: bool = True,
311
+ ) -> Union[DecoderOutput, torch.Tensor]:
312
+ """
313
+ Decode a batch of images.
314
+
315
+ Args:
316
+ z (`torch.Tensor`): Input batch of latent vectors.
317
+ return_dict (`bool`, *optional*, defaults to `True`):
318
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
319
+
320
+ Returns:
321
+ [`~models.vae.DecoderOutput`] or `tuple`:
322
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
323
+ returned.
324
+
325
+ """
326
+ batch_size = z.shape[0] // num_frames
327
+ image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device)
328
+ decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator)
329
+
330
+ if not return_dict:
331
+ return (decoded,)
332
+
333
+ return DecoderOutput(sample=decoded)
334
+
335
+ def forward(
336
+ self,
337
+ sample: torch.Tensor,
338
+ sample_posterior: bool = False,
339
+ return_dict: bool = True,
340
+ generator: Optional[torch.Generator] = None,
341
+ num_frames: int = 1,
342
+ ) -> Union[DecoderOutput, torch.Tensor]:
343
+ r"""
344
+ Args:
345
+ sample (`torch.Tensor`): Input sample.
346
+ sample_posterior (`bool`, *optional*, defaults to `False`):
347
+ Whether to sample from the posterior.
348
+ return_dict (`bool`, *optional*, defaults to `True`):
349
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
350
+ """
351
+ x = sample
352
+ posterior = self.encode(x).latent_dist
353
+ if sample_posterior:
354
+ z = posterior.sample(generator=generator)
355
+ else:
356
+ z = posterior.mode()
357
+
358
+ dec = self.decode(z, num_frames=num_frames).sample
359
+
360
+ if not return_dict:
361
+ return (dec,)
362
+
363
+ return DecoderOutput(sample=dec)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_kl_wan.py ADDED
@@ -0,0 +1,1419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+ import torch.utils.checkpoint
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...loaders import FromOriginalModelMixin
24
+ from ...utils import logging
25
+ from ...utils.accelerate_utils import apply_forward_hook
26
+ from ..activations import get_activation
27
+ from ..modeling_outputs import AutoencoderKLOutput
28
+ from ..modeling_utils import ModelMixin
29
+ from .vae import DecoderOutput, DiagonalGaussianDistribution
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+ CACHE_T = 2
35
+
36
+
37
+ class AvgDown3D(nn.Module):
38
+ def __init__(
39
+ self,
40
+ in_channels,
41
+ out_channels,
42
+ factor_t,
43
+ factor_s=1,
44
+ ):
45
+ super().__init__()
46
+ self.in_channels = in_channels
47
+ self.out_channels = out_channels
48
+ self.factor_t = factor_t
49
+ self.factor_s = factor_s
50
+ self.factor = self.factor_t * self.factor_s * self.factor_s
51
+
52
+ assert in_channels * self.factor % out_channels == 0
53
+ self.group_size = in_channels * self.factor // out_channels
54
+
55
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
56
+ pad_t = (self.factor_t - x.shape[2] % self.factor_t) % self.factor_t
57
+ pad = (0, 0, 0, 0, pad_t, 0)
58
+ x = F.pad(x, pad)
59
+ B, C, T, H, W = x.shape
60
+ x = x.view(
61
+ B,
62
+ C,
63
+ T // self.factor_t,
64
+ self.factor_t,
65
+ H // self.factor_s,
66
+ self.factor_s,
67
+ W // self.factor_s,
68
+ self.factor_s,
69
+ )
70
+ x = x.permute(0, 1, 3, 5, 7, 2, 4, 6).contiguous()
71
+ x = x.view(
72
+ B,
73
+ C * self.factor,
74
+ T // self.factor_t,
75
+ H // self.factor_s,
76
+ W // self.factor_s,
77
+ )
78
+ x = x.view(
79
+ B,
80
+ self.out_channels,
81
+ self.group_size,
82
+ T // self.factor_t,
83
+ H // self.factor_s,
84
+ W // self.factor_s,
85
+ )
86
+ x = x.mean(dim=2)
87
+ return x
88
+
89
+
90
+ class DupUp3D(nn.Module):
91
+ def __init__(
92
+ self,
93
+ in_channels: int,
94
+ out_channels: int,
95
+ factor_t,
96
+ factor_s=1,
97
+ ):
98
+ super().__init__()
99
+ self.in_channels = in_channels
100
+ self.out_channels = out_channels
101
+
102
+ self.factor_t = factor_t
103
+ self.factor_s = factor_s
104
+ self.factor = self.factor_t * self.factor_s * self.factor_s
105
+
106
+ assert out_channels * self.factor % in_channels == 0
107
+ self.repeats = out_channels * self.factor // in_channels
108
+
109
+ def forward(self, x: torch.Tensor, first_chunk=False) -> torch.Tensor:
110
+ x = x.repeat_interleave(self.repeats, dim=1)
111
+ x = x.view(
112
+ x.size(0),
113
+ self.out_channels,
114
+ self.factor_t,
115
+ self.factor_s,
116
+ self.factor_s,
117
+ x.size(2),
118
+ x.size(3),
119
+ x.size(4),
120
+ )
121
+ x = x.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
122
+ x = x.view(
123
+ x.size(0),
124
+ self.out_channels,
125
+ x.size(2) * self.factor_t,
126
+ x.size(4) * self.factor_s,
127
+ x.size(6) * self.factor_s,
128
+ )
129
+ if first_chunk:
130
+ x = x[:, :, self.factor_t - 1 :, :, :]
131
+ return x
132
+
133
+
134
+ class WanCausalConv3d(nn.Conv3d):
135
+ r"""
136
+ A custom 3D causal convolution layer with feature caching support.
137
+
138
+ This layer extends the standard Conv3D layer by ensuring causality in the time dimension and handling feature
139
+ caching for efficient inference.
140
+
141
+ Args:
142
+ in_channels (int): Number of channels in the input image
143
+ out_channels (int): Number of channels produced by the convolution
144
+ kernel_size (int or tuple): Size of the convolving kernel
145
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
146
+ padding (int or tuple, optional): Zero-padding added to all three sides of the input. Default: 0
147
+ """
148
+
149
+ def __init__(
150
+ self,
151
+ in_channels: int,
152
+ out_channels: int,
153
+ kernel_size: Union[int, Tuple[int, int, int]],
154
+ stride: Union[int, Tuple[int, int, int]] = 1,
155
+ padding: Union[int, Tuple[int, int, int]] = 0,
156
+ ) -> None:
157
+ super().__init__(
158
+ in_channels=in_channels,
159
+ out_channels=out_channels,
160
+ kernel_size=kernel_size,
161
+ stride=stride,
162
+ padding=padding,
163
+ )
164
+
165
+ # Set up causal padding
166
+ self._padding = (self.padding[2], self.padding[2], self.padding[1], self.padding[1], 2 * self.padding[0], 0)
167
+ self.padding = (0, 0, 0)
168
+
169
+ def forward(self, x, cache_x=None):
170
+ padding = list(self._padding)
171
+ if cache_x is not None and self._padding[4] > 0:
172
+ cache_x = cache_x.to(x.device)
173
+ x = torch.cat([cache_x, x], dim=2)
174
+ padding[4] -= cache_x.shape[2]
175
+ x = F.pad(x, padding)
176
+ return super().forward(x)
177
+
178
+
179
+ class WanRMS_norm(nn.Module):
180
+ r"""
181
+ A custom RMS normalization layer.
182
+
183
+ Args:
184
+ dim (int): The number of dimensions to normalize over.
185
+ channel_first (bool, optional): Whether the input tensor has channels as the first dimension.
186
+ Default is True.
187
+ images (bool, optional): Whether the input represents image data. Default is True.
188
+ bias (bool, optional): Whether to include a learnable bias term. Default is False.
189
+ """
190
+
191
+ def __init__(self, dim: int, channel_first: bool = True, images: bool = True, bias: bool = False) -> None:
192
+ super().__init__()
193
+ broadcastable_dims = (1, 1, 1) if not images else (1, 1)
194
+ shape = (dim, *broadcastable_dims) if channel_first else (dim,)
195
+
196
+ self.channel_first = channel_first
197
+ self.scale = dim**0.5
198
+ self.gamma = nn.Parameter(torch.ones(shape))
199
+ self.bias = nn.Parameter(torch.zeros(shape)) if bias else 0.0
200
+
201
+ def forward(self, x):
202
+ return F.normalize(x, dim=(1 if self.channel_first else -1)) * self.scale * self.gamma + self.bias
203
+
204
+
205
+ class WanUpsample(nn.Upsample):
206
+ r"""
207
+ Perform upsampling while ensuring the output tensor has the same data type as the input.
208
+
209
+ Args:
210
+ x (torch.Tensor): Input tensor to be upsampled.
211
+
212
+ Returns:
213
+ torch.Tensor: Upsampled tensor with the same data type as the input.
214
+ """
215
+
216
+ def forward(self, x):
217
+ return super().forward(x.float()).type_as(x)
218
+
219
+
220
+ class WanResample(nn.Module):
221
+ r"""
222
+ A custom resampling module for 2D and 3D data.
223
+
224
+ Args:
225
+ dim (int): The number of input/output channels.
226
+ mode (str): The resampling mode. Must be one of:
227
+ - 'none': No resampling (identity operation).
228
+ - 'upsample2d': 2D upsampling with nearest-exact interpolation and convolution.
229
+ - 'upsample3d': 3D upsampling with nearest-exact interpolation, convolution, and causal 3D convolution.
230
+ - 'downsample2d': 2D downsampling with zero-padding and convolution.
231
+ - 'downsample3d': 3D downsampling with zero-padding, convolution, and causal 3D convolution.
232
+ """
233
+
234
+ def __init__(self, dim: int, mode: str, upsample_out_dim: int = None) -> None:
235
+ super().__init__()
236
+ self.dim = dim
237
+ self.mode = mode
238
+
239
+ # default to dim //2
240
+ if upsample_out_dim is None:
241
+ upsample_out_dim = dim // 2
242
+
243
+ # layers
244
+ if mode == "upsample2d":
245
+ self.resample = nn.Sequential(
246
+ WanUpsample(scale_factor=(2.0, 2.0), mode="nearest-exact"),
247
+ nn.Conv2d(dim, upsample_out_dim, 3, padding=1),
248
+ )
249
+ elif mode == "upsample3d":
250
+ self.resample = nn.Sequential(
251
+ WanUpsample(scale_factor=(2.0, 2.0), mode="nearest-exact"),
252
+ nn.Conv2d(dim, upsample_out_dim, 3, padding=1),
253
+ )
254
+ self.time_conv = WanCausalConv3d(dim, dim * 2, (3, 1, 1), padding=(1, 0, 0))
255
+
256
+ elif mode == "downsample2d":
257
+ self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2)))
258
+ elif mode == "downsample3d":
259
+ self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2)))
260
+ self.time_conv = WanCausalConv3d(dim, dim, (3, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))
261
+
262
+ else:
263
+ self.resample = nn.Identity()
264
+
265
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
266
+ b, c, t, h, w = x.size()
267
+ if self.mode == "upsample3d":
268
+ if feat_cache is not None:
269
+ idx = feat_idx[0]
270
+ if feat_cache[idx] is None:
271
+ feat_cache[idx] = "Rep"
272
+ feat_idx[0] += 1
273
+ else:
274
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
275
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] != "Rep":
276
+ # cache last frame of last two chunk
277
+ cache_x = torch.cat(
278
+ [feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2
279
+ )
280
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] == "Rep":
281
+ cache_x = torch.cat([torch.zeros_like(cache_x).to(cache_x.device), cache_x], dim=2)
282
+ if feat_cache[idx] == "Rep":
283
+ x = self.time_conv(x)
284
+ else:
285
+ x = self.time_conv(x, feat_cache[idx])
286
+ feat_cache[idx] = cache_x
287
+ feat_idx[0] += 1
288
+
289
+ x = x.reshape(b, 2, c, t, h, w)
290
+ x = torch.stack((x[:, 0, :, :, :, :], x[:, 1, :, :, :, :]), 3)
291
+ x = x.reshape(b, c, t * 2, h, w)
292
+ t = x.shape[2]
293
+ x = x.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w)
294
+ x = self.resample(x)
295
+ x = x.view(b, t, x.size(1), x.size(2), x.size(3)).permute(0, 2, 1, 3, 4)
296
+
297
+ if self.mode == "downsample3d":
298
+ if feat_cache is not None:
299
+ idx = feat_idx[0]
300
+ if feat_cache[idx] is None:
301
+ feat_cache[idx] = x.clone()
302
+ feat_idx[0] += 1
303
+ else:
304
+ cache_x = x[:, :, -1:, :, :].clone()
305
+ x = self.time_conv(torch.cat([feat_cache[idx][:, :, -1:, :, :], x], 2))
306
+ feat_cache[idx] = cache_x
307
+ feat_idx[0] += 1
308
+ return x
309
+
310
+
311
+ class WanResidualBlock(nn.Module):
312
+ r"""
313
+ A custom residual block module.
314
+
315
+ Args:
316
+ in_dim (int): Number of input channels.
317
+ out_dim (int): Number of output channels.
318
+ dropout (float, optional): Dropout rate for the dropout layer. Default is 0.0.
319
+ non_linearity (str, optional): Type of non-linearity to use. Default is "silu".
320
+ """
321
+
322
+ def __init__(
323
+ self,
324
+ in_dim: int,
325
+ out_dim: int,
326
+ dropout: float = 0.0,
327
+ non_linearity: str = "silu",
328
+ ) -> None:
329
+ super().__init__()
330
+ self.in_dim = in_dim
331
+ self.out_dim = out_dim
332
+ self.nonlinearity = get_activation(non_linearity)
333
+
334
+ # layers
335
+ self.norm1 = WanRMS_norm(in_dim, images=False)
336
+ self.conv1 = WanCausalConv3d(in_dim, out_dim, 3, padding=1)
337
+ self.norm2 = WanRMS_norm(out_dim, images=False)
338
+ self.dropout = nn.Dropout(dropout)
339
+ self.conv2 = WanCausalConv3d(out_dim, out_dim, 3, padding=1)
340
+ self.conv_shortcut = WanCausalConv3d(in_dim, out_dim, 1) if in_dim != out_dim else nn.Identity()
341
+
342
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
343
+ # Apply shortcut connection
344
+ h = self.conv_shortcut(x)
345
+
346
+ # First normalization and activation
347
+ x = self.norm1(x)
348
+ x = self.nonlinearity(x)
349
+
350
+ if feat_cache is not None:
351
+ idx = feat_idx[0]
352
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
353
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
354
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
355
+
356
+ x = self.conv1(x, feat_cache[idx])
357
+ feat_cache[idx] = cache_x
358
+ feat_idx[0] += 1
359
+ else:
360
+ x = self.conv1(x)
361
+
362
+ # Second normalization and activation
363
+ x = self.norm2(x)
364
+ x = self.nonlinearity(x)
365
+
366
+ # Dropout
367
+ x = self.dropout(x)
368
+
369
+ if feat_cache is not None:
370
+ idx = feat_idx[0]
371
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
372
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
373
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
374
+
375
+ x = self.conv2(x, feat_cache[idx])
376
+ feat_cache[idx] = cache_x
377
+ feat_idx[0] += 1
378
+ else:
379
+ x = self.conv2(x)
380
+
381
+ # Add residual connection
382
+ return x + h
383
+
384
+
385
+ class WanAttentionBlock(nn.Module):
386
+ r"""
387
+ Causal self-attention with a single head.
388
+
389
+ Args:
390
+ dim (int): The number of channels in the input tensor.
391
+ """
392
+
393
+ def __init__(self, dim):
394
+ super().__init__()
395
+ self.dim = dim
396
+
397
+ # layers
398
+ self.norm = WanRMS_norm(dim)
399
+ self.to_qkv = nn.Conv2d(dim, dim * 3, 1)
400
+ self.proj = nn.Conv2d(dim, dim, 1)
401
+
402
+ def forward(self, x):
403
+ identity = x
404
+ batch_size, channels, time, height, width = x.size()
405
+
406
+ x = x.permute(0, 2, 1, 3, 4).reshape(batch_size * time, channels, height, width)
407
+ x = self.norm(x)
408
+
409
+ # compute query, key, value
410
+ qkv = self.to_qkv(x)
411
+ qkv = qkv.reshape(batch_size * time, 1, channels * 3, -1)
412
+ qkv = qkv.permute(0, 1, 3, 2).contiguous()
413
+ q, k, v = qkv.chunk(3, dim=-1)
414
+
415
+ # apply attention
416
+ x = F.scaled_dot_product_attention(q, k, v)
417
+
418
+ x = x.squeeze(1).permute(0, 2, 1).reshape(batch_size * time, channels, height, width)
419
+
420
+ # output projection
421
+ x = self.proj(x)
422
+
423
+ # Reshape back: [(b*t), c, h, w] -> [b, c, t, h, w]
424
+ x = x.view(batch_size, time, channels, height, width)
425
+ x = x.permute(0, 2, 1, 3, 4)
426
+
427
+ return x + identity
428
+
429
+
430
+ class WanMidBlock(nn.Module):
431
+ """
432
+ Middle block for WanVAE encoder and decoder.
433
+
434
+ Args:
435
+ dim (int): Number of input/output channels.
436
+ dropout (float): Dropout rate.
437
+ non_linearity (str): Type of non-linearity to use.
438
+ """
439
+
440
+ def __init__(self, dim: int, dropout: float = 0.0, non_linearity: str = "silu", num_layers: int = 1):
441
+ super().__init__()
442
+ self.dim = dim
443
+
444
+ # Create the components
445
+ resnets = [WanResidualBlock(dim, dim, dropout, non_linearity)]
446
+ attentions = []
447
+ for _ in range(num_layers):
448
+ attentions.append(WanAttentionBlock(dim))
449
+ resnets.append(WanResidualBlock(dim, dim, dropout, non_linearity))
450
+ self.attentions = nn.ModuleList(attentions)
451
+ self.resnets = nn.ModuleList(resnets)
452
+
453
+ self.gradient_checkpointing = False
454
+
455
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
456
+ # First residual block
457
+ x = self.resnets[0](x, feat_cache, feat_idx)
458
+
459
+ # Process through attention and residual blocks
460
+ for attn, resnet in zip(self.attentions, self.resnets[1:]):
461
+ if attn is not None:
462
+ x = attn(x)
463
+
464
+ x = resnet(x, feat_cache, feat_idx)
465
+
466
+ return x
467
+
468
+
469
+ class WanResidualDownBlock(nn.Module):
470
+ def __init__(self, in_dim, out_dim, dropout, num_res_blocks, temperal_downsample=False, down_flag=False):
471
+ super().__init__()
472
+
473
+ # Shortcut path with downsample
474
+ self.avg_shortcut = AvgDown3D(
475
+ in_dim,
476
+ out_dim,
477
+ factor_t=2 if temperal_downsample else 1,
478
+ factor_s=2 if down_flag else 1,
479
+ )
480
+
481
+ # Main path with residual blocks and downsample
482
+ resnets = []
483
+ for _ in range(num_res_blocks):
484
+ resnets.append(WanResidualBlock(in_dim, out_dim, dropout))
485
+ in_dim = out_dim
486
+ self.resnets = nn.ModuleList(resnets)
487
+
488
+ # Add the final downsample block
489
+ if down_flag:
490
+ mode = "downsample3d" if temperal_downsample else "downsample2d"
491
+ self.downsampler = WanResample(out_dim, mode=mode)
492
+ else:
493
+ self.downsampler = None
494
+
495
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
496
+ x_copy = x.clone()
497
+ for resnet in self.resnets:
498
+ x = resnet(x, feat_cache, feat_idx)
499
+ if self.downsampler is not None:
500
+ x = self.downsampler(x, feat_cache, feat_idx)
501
+
502
+ return x + self.avg_shortcut(x_copy)
503
+
504
+
505
+ class WanEncoder3d(nn.Module):
506
+ r"""
507
+ A 3D encoder module.
508
+
509
+ Args:
510
+ dim (int): The base number of channels in the first layer.
511
+ z_dim (int): The dimensionality of the latent space.
512
+ dim_mult (list of int): Multipliers for the number of channels in each block.
513
+ num_res_blocks (int): Number of residual blocks in each block.
514
+ attn_scales (list of float): Scales at which to apply attention mechanisms.
515
+ temperal_downsample (list of bool): Whether to downsample temporally in each block.
516
+ dropout (float): Dropout rate for the dropout layers.
517
+ non_linearity (str): Type of non-linearity to use.
518
+ """
519
+
520
+ def __init__(
521
+ self,
522
+ in_channels: int = 3,
523
+ dim=128,
524
+ z_dim=4,
525
+ dim_mult=[1, 2, 4, 4],
526
+ num_res_blocks=2,
527
+ attn_scales=[],
528
+ temperal_downsample=[True, True, False],
529
+ dropout=0.0,
530
+ non_linearity: str = "silu",
531
+ is_residual: bool = False, # wan 2.2 vae use a residual downblock
532
+ ):
533
+ super().__init__()
534
+ self.dim = dim
535
+ self.z_dim = z_dim
536
+ self.dim_mult = dim_mult
537
+ self.num_res_blocks = num_res_blocks
538
+ self.attn_scales = attn_scales
539
+ self.temperal_downsample = temperal_downsample
540
+ self.nonlinearity = get_activation(non_linearity)
541
+
542
+ # dimensions
543
+ dims = [dim * u for u in [1] + dim_mult]
544
+ scale = 1.0
545
+
546
+ # init block
547
+ self.conv_in = WanCausalConv3d(in_channels, dims[0], 3, padding=1)
548
+
549
+ # downsample blocks
550
+ self.down_blocks = nn.ModuleList([])
551
+ for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
552
+ # residual (+attention) blocks
553
+ if is_residual:
554
+ self.down_blocks.append(
555
+ WanResidualDownBlock(
556
+ in_dim,
557
+ out_dim,
558
+ dropout,
559
+ num_res_blocks,
560
+ temperal_downsample=temperal_downsample[i] if i != len(dim_mult) - 1 else False,
561
+ down_flag=i != len(dim_mult) - 1,
562
+ )
563
+ )
564
+ else:
565
+ for _ in range(num_res_blocks):
566
+ self.down_blocks.append(WanResidualBlock(in_dim, out_dim, dropout))
567
+ if scale in attn_scales:
568
+ self.down_blocks.append(WanAttentionBlock(out_dim))
569
+ in_dim = out_dim
570
+
571
+ # downsample block
572
+ if i != len(dim_mult) - 1:
573
+ mode = "downsample3d" if temperal_downsample[i] else "downsample2d"
574
+ self.down_blocks.append(WanResample(out_dim, mode=mode))
575
+ scale /= 2.0
576
+
577
+ # middle blocks
578
+ self.mid_block = WanMidBlock(out_dim, dropout, non_linearity, num_layers=1)
579
+
580
+ # output blocks
581
+ self.norm_out = WanRMS_norm(out_dim, images=False)
582
+ self.conv_out = WanCausalConv3d(out_dim, z_dim, 3, padding=1)
583
+
584
+ self.gradient_checkpointing = False
585
+
586
+ def forward(self, x, feat_cache=None, feat_idx=[0]):
587
+ if feat_cache is not None:
588
+ idx = feat_idx[0]
589
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
590
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
591
+ # cache last frame of last two chunk
592
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
593
+ x = self.conv_in(x, feat_cache[idx])
594
+ feat_cache[idx] = cache_x
595
+ feat_idx[0] += 1
596
+ else:
597
+ x = self.conv_in(x)
598
+
599
+ ## downsamples
600
+ for layer in self.down_blocks:
601
+ if feat_cache is not None:
602
+ x = layer(x, feat_cache, feat_idx)
603
+ else:
604
+ x = layer(x)
605
+
606
+ ## middle
607
+ x = self.mid_block(x, feat_cache, feat_idx)
608
+
609
+ ## head
610
+ x = self.norm_out(x)
611
+ x = self.nonlinearity(x)
612
+ if feat_cache is not None:
613
+ idx = feat_idx[0]
614
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
615
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
616
+ # cache last frame of last two chunk
617
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
618
+ x = self.conv_out(x, feat_cache[idx])
619
+ feat_cache[idx] = cache_x
620
+ feat_idx[0] += 1
621
+ else:
622
+ x = self.conv_out(x)
623
+ return x
624
+
625
+
626
+ class WanResidualUpBlock(nn.Module):
627
+ """
628
+ A block that handles upsampling for the WanVAE decoder.
629
+
630
+ Args:
631
+ in_dim (int): Input dimension
632
+ out_dim (int): Output dimension
633
+ num_res_blocks (int): Number of residual blocks
634
+ dropout (float): Dropout rate
635
+ temperal_upsample (bool): Whether to upsample on temporal dimension
636
+ up_flag (bool): Whether to upsample or not
637
+ non_linearity (str): Type of non-linearity to use
638
+ """
639
+
640
+ def __init__(
641
+ self,
642
+ in_dim: int,
643
+ out_dim: int,
644
+ num_res_blocks: int,
645
+ dropout: float = 0.0,
646
+ temperal_upsample: bool = False,
647
+ up_flag: bool = False,
648
+ non_linearity: str = "silu",
649
+ ):
650
+ super().__init__()
651
+ self.in_dim = in_dim
652
+ self.out_dim = out_dim
653
+
654
+ if up_flag:
655
+ self.avg_shortcut = DupUp3D(
656
+ in_dim,
657
+ out_dim,
658
+ factor_t=2 if temperal_upsample else 1,
659
+ factor_s=2,
660
+ )
661
+ else:
662
+ self.avg_shortcut = None
663
+
664
+ # create residual blocks
665
+ resnets = []
666
+ current_dim = in_dim
667
+ for _ in range(num_res_blocks + 1):
668
+ resnets.append(WanResidualBlock(current_dim, out_dim, dropout, non_linearity))
669
+ current_dim = out_dim
670
+
671
+ self.resnets = nn.ModuleList(resnets)
672
+
673
+ # Add upsampling layer if needed
674
+ if up_flag:
675
+ upsample_mode = "upsample3d" if temperal_upsample else "upsample2d"
676
+ self.upsampler = WanResample(out_dim, mode=upsample_mode, upsample_out_dim=out_dim)
677
+ else:
678
+ self.upsampler = None
679
+
680
+ self.gradient_checkpointing = False
681
+
682
+ def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False):
683
+ """
684
+ Forward pass through the upsampling block.
685
+
686
+ Args:
687
+ x (torch.Tensor): Input tensor
688
+ feat_cache (list, optional): Feature cache for causal convolutions
689
+ feat_idx (list, optional): Feature index for cache management
690
+
691
+ Returns:
692
+ torch.Tensor: Output tensor
693
+ """
694
+ x_copy = x.clone()
695
+
696
+ for resnet in self.resnets:
697
+ if feat_cache is not None:
698
+ x = resnet(x, feat_cache, feat_idx)
699
+ else:
700
+ x = resnet(x)
701
+
702
+ if self.upsampler is not None:
703
+ if feat_cache is not None:
704
+ x = self.upsampler(x, feat_cache, feat_idx)
705
+ else:
706
+ x = self.upsampler(x)
707
+
708
+ if self.avg_shortcut is not None:
709
+ x = x + self.avg_shortcut(x_copy, first_chunk=first_chunk)
710
+
711
+ return x
712
+
713
+
714
+ class WanUpBlock(nn.Module):
715
+ """
716
+ A block that handles upsampling for the WanVAE decoder.
717
+
718
+ Args:
719
+ in_dim (int): Input dimension
720
+ out_dim (int): Output dimension
721
+ num_res_blocks (int): Number of residual blocks
722
+ dropout (float): Dropout rate
723
+ upsample_mode (str, optional): Mode for upsampling ('upsample2d' or 'upsample3d')
724
+ non_linearity (str): Type of non-linearity to use
725
+ """
726
+
727
+ def __init__(
728
+ self,
729
+ in_dim: int,
730
+ out_dim: int,
731
+ num_res_blocks: int,
732
+ dropout: float = 0.0,
733
+ upsample_mode: Optional[str] = None,
734
+ non_linearity: str = "silu",
735
+ ):
736
+ super().__init__()
737
+ self.in_dim = in_dim
738
+ self.out_dim = out_dim
739
+
740
+ # Create layers list
741
+ resnets = []
742
+ # Add residual blocks and attention if needed
743
+ current_dim = in_dim
744
+ for _ in range(num_res_blocks + 1):
745
+ resnets.append(WanResidualBlock(current_dim, out_dim, dropout, non_linearity))
746
+ current_dim = out_dim
747
+
748
+ self.resnets = nn.ModuleList(resnets)
749
+
750
+ # Add upsampling layer if needed
751
+ self.upsamplers = None
752
+ if upsample_mode is not None:
753
+ self.upsamplers = nn.ModuleList([WanResample(out_dim, mode=upsample_mode)])
754
+
755
+ self.gradient_checkpointing = False
756
+
757
+ def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=None):
758
+ """
759
+ Forward pass through the upsampling block.
760
+
761
+ Args:
762
+ x (torch.Tensor): Input tensor
763
+ feat_cache (list, optional): Feature cache for causal convolutions
764
+ feat_idx (list, optional): Feature index for cache management
765
+
766
+ Returns:
767
+ torch.Tensor: Output tensor
768
+ """
769
+ for resnet in self.resnets:
770
+ if feat_cache is not None:
771
+ x = resnet(x, feat_cache, feat_idx)
772
+ else:
773
+ x = resnet(x)
774
+
775
+ if self.upsamplers is not None:
776
+ if feat_cache is not None:
777
+ x = self.upsamplers[0](x, feat_cache, feat_idx)
778
+ else:
779
+ x = self.upsamplers[0](x)
780
+ return x
781
+
782
+
783
+ class WanDecoder3d(nn.Module):
784
+ r"""
785
+ A 3D decoder module.
786
+
787
+ Args:
788
+ dim (int): The base number of channels in the first layer.
789
+ z_dim (int): The dimensionality of the latent space.
790
+ dim_mult (list of int): Multipliers for the number of channels in each block.
791
+ num_res_blocks (int): Number of residual blocks in each block.
792
+ attn_scales (list of float): Scales at which to apply attention mechanisms.
793
+ temperal_upsample (list of bool): Whether to upsample temporally in each block.
794
+ dropout (float): Dropout rate for the dropout layers.
795
+ non_linearity (str): Type of non-linearity to use.
796
+ """
797
+
798
+ def __init__(
799
+ self,
800
+ dim=128,
801
+ z_dim=4,
802
+ dim_mult=[1, 2, 4, 4],
803
+ num_res_blocks=2,
804
+ attn_scales=[],
805
+ temperal_upsample=[False, True, True],
806
+ dropout=0.0,
807
+ non_linearity: str = "silu",
808
+ out_channels: int = 3,
809
+ is_residual: bool = False,
810
+ ):
811
+ super().__init__()
812
+ self.dim = dim
813
+ self.z_dim = z_dim
814
+ self.dim_mult = dim_mult
815
+ self.num_res_blocks = num_res_blocks
816
+ self.attn_scales = attn_scales
817
+ self.temperal_upsample = temperal_upsample
818
+
819
+ self.nonlinearity = get_activation(non_linearity)
820
+
821
+ # dimensions
822
+ dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]]
823
+
824
+ # init block
825
+ self.conv_in = WanCausalConv3d(z_dim, dims[0], 3, padding=1)
826
+
827
+ # middle blocks
828
+ self.mid_block = WanMidBlock(dims[0], dropout, non_linearity, num_layers=1)
829
+
830
+ # upsample blocks
831
+ self.up_blocks = nn.ModuleList([])
832
+ for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
833
+ # residual (+attention) blocks
834
+ if i > 0 and not is_residual:
835
+ # wan vae 2.1
836
+ in_dim = in_dim // 2
837
+
838
+ # determine if we need upsampling
839
+ up_flag = i != len(dim_mult) - 1
840
+ # determine upsampling mode, if not upsampling, set to None
841
+ upsample_mode = None
842
+ if up_flag and temperal_upsample[i]:
843
+ upsample_mode = "upsample3d"
844
+ elif up_flag:
845
+ upsample_mode = "upsample2d"
846
+ # Create and add the upsampling block
847
+ if is_residual:
848
+ up_block = WanResidualUpBlock(
849
+ in_dim=in_dim,
850
+ out_dim=out_dim,
851
+ num_res_blocks=num_res_blocks,
852
+ dropout=dropout,
853
+ temperal_upsample=temperal_upsample[i] if up_flag else False,
854
+ up_flag=up_flag,
855
+ non_linearity=non_linearity,
856
+ )
857
+ else:
858
+ up_block = WanUpBlock(
859
+ in_dim=in_dim,
860
+ out_dim=out_dim,
861
+ num_res_blocks=num_res_blocks,
862
+ dropout=dropout,
863
+ upsample_mode=upsample_mode,
864
+ non_linearity=non_linearity,
865
+ )
866
+ self.up_blocks.append(up_block)
867
+
868
+ # output blocks
869
+ self.norm_out = WanRMS_norm(out_dim, images=False)
870
+ self.conv_out = WanCausalConv3d(out_dim, out_channels, 3, padding=1)
871
+
872
+ self.gradient_checkpointing = False
873
+
874
+ def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False):
875
+ ## conv1
876
+ if feat_cache is not None:
877
+ idx = feat_idx[0]
878
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
879
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
880
+ # cache last frame of last two chunk
881
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
882
+ x = self.conv_in(x, feat_cache[idx])
883
+ feat_cache[idx] = cache_x
884
+ feat_idx[0] += 1
885
+ else:
886
+ x = self.conv_in(x)
887
+
888
+ ## middle
889
+ x = self.mid_block(x, feat_cache, feat_idx)
890
+
891
+ ## upsamples
892
+ for up_block in self.up_blocks:
893
+ x = up_block(x, feat_cache, feat_idx, first_chunk=first_chunk)
894
+
895
+ ## head
896
+ x = self.norm_out(x)
897
+ x = self.nonlinearity(x)
898
+ if feat_cache is not None:
899
+ idx = feat_idx[0]
900
+ cache_x = x[:, :, -CACHE_T:, :, :].clone()
901
+ if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
902
+ # cache last frame of last two chunk
903
+ cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
904
+ x = self.conv_out(x, feat_cache[idx])
905
+ feat_cache[idx] = cache_x
906
+ feat_idx[0] += 1
907
+ else:
908
+ x = self.conv_out(x)
909
+ return x
910
+
911
+
912
+ def patchify(x, patch_size):
913
+ if patch_size == 1:
914
+ return x
915
+
916
+ if x.dim() != 5:
917
+ raise ValueError(f"Invalid input shape: {x.shape}")
918
+ # x shape: [batch_size, channels, frames, height, width]
919
+ batch_size, channels, frames, height, width = x.shape
920
+
921
+ # Ensure height and width are divisible by patch_size
922
+ if height % patch_size != 0 or width % patch_size != 0:
923
+ raise ValueError(f"Height ({height}) and width ({width}) must be divisible by patch_size ({patch_size})")
924
+
925
+ # Reshape to [batch_size, channels, frames, height//patch_size, patch_size, width//patch_size, patch_size]
926
+ x = x.view(batch_size, channels, frames, height // patch_size, patch_size, width // patch_size, patch_size)
927
+
928
+ # Rearrange to [batch_size, channels * patch_size * patch_size, frames, height//patch_size, width//patch_size]
929
+ x = x.permute(0, 1, 6, 4, 2, 3, 5).contiguous()
930
+ x = x.view(batch_size, channels * patch_size * patch_size, frames, height // patch_size, width // patch_size)
931
+
932
+ return x
933
+
934
+
935
+ def unpatchify(x, patch_size):
936
+ if patch_size == 1:
937
+ return x
938
+
939
+ if x.dim() != 5:
940
+ raise ValueError(f"Invalid input shape: {x.shape}")
941
+ # x shape: [batch_size, (channels * patch_size * patch_size), frame, height, width]
942
+ batch_size, c_patches, frames, height, width = x.shape
943
+ channels = c_patches // (patch_size * patch_size)
944
+
945
+ # Reshape to [b, c, patch_size, patch_size, f, h, w]
946
+ x = x.view(batch_size, channels, patch_size, patch_size, frames, height, width)
947
+
948
+ # Rearrange to [b, c, f, h * patch_size, w * patch_size]
949
+ x = x.permute(0, 1, 4, 5, 3, 6, 2).contiguous()
950
+ x = x.view(batch_size, channels, frames, height * patch_size, width * patch_size)
951
+
952
+ return x
953
+
954
+
955
+ class AutoencoderKLWan(ModelMixin, ConfigMixin, FromOriginalModelMixin):
956
+ r"""
957
+ A VAE model with KL loss for encoding videos into latents and decoding latent representations into videos.
958
+ Introduced in [Wan 2.1].
959
+
960
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
961
+ for all models (such as downloading or saving).
962
+ """
963
+
964
+ _supports_gradient_checkpointing = False
965
+
966
+ @register_to_config
967
+ def __init__(
968
+ self,
969
+ base_dim: int = 96,
970
+ decoder_base_dim: Optional[int] = None,
971
+ z_dim: int = 16,
972
+ dim_mult: Tuple[int] = [1, 2, 4, 4],
973
+ num_res_blocks: int = 2,
974
+ attn_scales: List[float] = [],
975
+ temperal_downsample: List[bool] = [False, True, True],
976
+ dropout: float = 0.0,
977
+ latents_mean: List[float] = [
978
+ -0.7571,
979
+ -0.7089,
980
+ -0.9113,
981
+ 0.1075,
982
+ -0.1745,
983
+ 0.9653,
984
+ -0.1517,
985
+ 1.5508,
986
+ 0.4134,
987
+ -0.0715,
988
+ 0.5517,
989
+ -0.3632,
990
+ -0.1922,
991
+ -0.9497,
992
+ 0.2503,
993
+ -0.2921,
994
+ ],
995
+ latents_std: List[float] = [
996
+ 2.8184,
997
+ 1.4541,
998
+ 2.3275,
999
+ 2.6558,
1000
+ 1.2196,
1001
+ 1.7708,
1002
+ 2.6052,
1003
+ 2.0743,
1004
+ 3.2687,
1005
+ 2.1526,
1006
+ 2.8652,
1007
+ 1.5579,
1008
+ 1.6382,
1009
+ 1.1253,
1010
+ 2.8251,
1011
+ 1.9160,
1012
+ ],
1013
+ is_residual: bool = False,
1014
+ in_channels: int = 3,
1015
+ out_channels: int = 3,
1016
+ patch_size: Optional[int] = None,
1017
+ scale_factor_temporal: Optional[int] = 4,
1018
+ scale_factor_spatial: Optional[int] = 8,
1019
+ ) -> None:
1020
+ super().__init__()
1021
+
1022
+ self.z_dim = z_dim
1023
+ self.temperal_downsample = temperal_downsample
1024
+ self.temperal_upsample = temperal_downsample[::-1]
1025
+
1026
+ if decoder_base_dim is None:
1027
+ decoder_base_dim = base_dim
1028
+
1029
+ self.encoder = WanEncoder3d(
1030
+ in_channels=in_channels,
1031
+ dim=base_dim,
1032
+ z_dim=z_dim * 2,
1033
+ dim_mult=dim_mult,
1034
+ num_res_blocks=num_res_blocks,
1035
+ attn_scales=attn_scales,
1036
+ temperal_downsample=temperal_downsample,
1037
+ dropout=dropout,
1038
+ is_residual=is_residual,
1039
+ )
1040
+ self.quant_conv = WanCausalConv3d(z_dim * 2, z_dim * 2, 1)
1041
+ self.post_quant_conv = WanCausalConv3d(z_dim, z_dim, 1)
1042
+
1043
+ self.decoder = WanDecoder3d(
1044
+ dim=decoder_base_dim,
1045
+ z_dim=z_dim,
1046
+ dim_mult=dim_mult,
1047
+ num_res_blocks=num_res_blocks,
1048
+ attn_scales=attn_scales,
1049
+ temperal_upsample=self.temperal_upsample,
1050
+ dropout=dropout,
1051
+ out_channels=out_channels,
1052
+ is_residual=is_residual,
1053
+ )
1054
+
1055
+ self.spatial_compression_ratio = 2 ** len(self.temperal_downsample)
1056
+
1057
+ # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension
1058
+ # to perform decoding of a single video latent at a time.
1059
+ self.use_slicing = False
1060
+
1061
+ # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent
1062
+ # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the
1063
+ # intermediate tiles together, the memory requirement can be lowered.
1064
+ self.use_tiling = False
1065
+
1066
+ # The minimal tile height and width for spatial tiling to be used
1067
+ self.tile_sample_min_height = 256
1068
+ self.tile_sample_min_width = 256
1069
+
1070
+ # The minimal distance between two spatial tiles
1071
+ self.tile_sample_stride_height = 192
1072
+ self.tile_sample_stride_width = 192
1073
+
1074
+ # Precompute and cache conv counts for encoder and decoder for clear_cache speedup
1075
+ self._cached_conv_counts = {
1076
+ "decoder": sum(isinstance(m, WanCausalConv3d) for m in self.decoder.modules())
1077
+ if self.decoder is not None
1078
+ else 0,
1079
+ "encoder": sum(isinstance(m, WanCausalConv3d) for m in self.encoder.modules())
1080
+ if self.encoder is not None
1081
+ else 0,
1082
+ }
1083
+
1084
+ def enable_tiling(
1085
+ self,
1086
+ tile_sample_min_height: Optional[int] = None,
1087
+ tile_sample_min_width: Optional[int] = None,
1088
+ tile_sample_stride_height: Optional[float] = None,
1089
+ tile_sample_stride_width: Optional[float] = None,
1090
+ ) -> None:
1091
+ r"""
1092
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
1093
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
1094
+ processing larger images.
1095
+
1096
+ Args:
1097
+ tile_sample_min_height (`int`, *optional*):
1098
+ The minimum height required for a sample to be separated into tiles across the height dimension.
1099
+ tile_sample_min_width (`int`, *optional*):
1100
+ The minimum width required for a sample to be separated into tiles across the width dimension.
1101
+ tile_sample_stride_height (`int`, *optional*):
1102
+ The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
1103
+ no tiling artifacts produced across the height dimension.
1104
+ tile_sample_stride_width (`int`, *optional*):
1105
+ The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
1106
+ artifacts produced across the width dimension.
1107
+ """
1108
+ self.use_tiling = True
1109
+ self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
1110
+ self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
1111
+ self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height
1112
+ self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width
1113
+
1114
+ def disable_tiling(self) -> None:
1115
+ r"""
1116
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
1117
+ decoding in one step.
1118
+ """
1119
+ self.use_tiling = False
1120
+
1121
+ def enable_slicing(self) -> None:
1122
+ r"""
1123
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
1124
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
1125
+ """
1126
+ self.use_slicing = True
1127
+
1128
+ def disable_slicing(self) -> None:
1129
+ r"""
1130
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
1131
+ decoding in one step.
1132
+ """
1133
+ self.use_slicing = False
1134
+
1135
+ def clear_cache(self):
1136
+ # Use cached conv counts for decoder and encoder to avoid re-iterating modules each call
1137
+ self._conv_num = self._cached_conv_counts["decoder"]
1138
+ self._conv_idx = [0]
1139
+ self._feat_map = [None] * self._conv_num
1140
+ # cache encode
1141
+ self._enc_conv_num = self._cached_conv_counts["encoder"]
1142
+ self._enc_conv_idx = [0]
1143
+ self._enc_feat_map = [None] * self._enc_conv_num
1144
+
1145
+ def _encode(self, x: torch.Tensor):
1146
+ _, _, num_frame, height, width = x.shape
1147
+
1148
+ if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height):
1149
+ return self.tiled_encode(x)
1150
+
1151
+ self.clear_cache()
1152
+ if self.config.patch_size is not None:
1153
+ x = patchify(x, patch_size=self.config.patch_size)
1154
+ iter_ = 1 + (num_frame - 1) // 4
1155
+ for i in range(iter_):
1156
+ self._enc_conv_idx = [0]
1157
+ if i == 0:
1158
+ out = self.encoder(x[:, :, :1, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx)
1159
+ else:
1160
+ out_ = self.encoder(
1161
+ x[:, :, 1 + 4 * (i - 1) : 1 + 4 * i, :, :],
1162
+ feat_cache=self._enc_feat_map,
1163
+ feat_idx=self._enc_conv_idx,
1164
+ )
1165
+ out = torch.cat([out, out_], 2)
1166
+
1167
+ enc = self.quant_conv(out)
1168
+ self.clear_cache()
1169
+ return enc
1170
+
1171
+ @apply_forward_hook
1172
+ def encode(
1173
+ self, x: torch.Tensor, return_dict: bool = True
1174
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
1175
+ r"""
1176
+ Encode a batch of images into latents.
1177
+
1178
+ Args:
1179
+ x (`torch.Tensor`): Input batch of images.
1180
+ return_dict (`bool`, *optional*, defaults to `True`):
1181
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
1182
+
1183
+ Returns:
1184
+ The latent representations of the encoded videos. If `return_dict` is True, a
1185
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
1186
+ """
1187
+ if self.use_slicing and x.shape[0] > 1:
1188
+ encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
1189
+ h = torch.cat(encoded_slices)
1190
+ else:
1191
+ h = self._encode(x)
1192
+ posterior = DiagonalGaussianDistribution(h)
1193
+
1194
+ if not return_dict:
1195
+ return (posterior,)
1196
+ return AutoencoderKLOutput(latent_dist=posterior)
1197
+
1198
+ def _decode(self, z: torch.Tensor, return_dict: bool = True):
1199
+ _, _, num_frame, height, width = z.shape
1200
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1201
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1202
+
1203
+ if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height):
1204
+ return self.tiled_decode(z, return_dict=return_dict)
1205
+
1206
+ self.clear_cache()
1207
+ x = self.post_quant_conv(z)
1208
+ for i in range(num_frame):
1209
+ self._conv_idx = [0]
1210
+ if i == 0:
1211
+ out = self.decoder(
1212
+ x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx, first_chunk=True
1213
+ )
1214
+ else:
1215
+ out_ = self.decoder(x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx)
1216
+ out = torch.cat([out, out_], 2)
1217
+
1218
+ if self.config.patch_size is not None:
1219
+ out = unpatchify(out, patch_size=self.config.patch_size)
1220
+
1221
+ out = torch.clamp(out, min=-1.0, max=1.0)
1222
+
1223
+ self.clear_cache()
1224
+ if not return_dict:
1225
+ return (out,)
1226
+
1227
+ return DecoderOutput(sample=out)
1228
+
1229
+ @apply_forward_hook
1230
+ def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
1231
+ r"""
1232
+ Decode a batch of images.
1233
+
1234
+ Args:
1235
+ z (`torch.Tensor`): Input batch of latent vectors.
1236
+ return_dict (`bool`, *optional*, defaults to `True`):
1237
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
1238
+
1239
+ Returns:
1240
+ [`~models.vae.DecoderOutput`] or `tuple`:
1241
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
1242
+ returned.
1243
+ """
1244
+ if self.use_slicing and z.shape[0] > 1:
1245
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
1246
+ decoded = torch.cat(decoded_slices)
1247
+ else:
1248
+ decoded = self._decode(z).sample
1249
+
1250
+ if not return_dict:
1251
+ return (decoded,)
1252
+ return DecoderOutput(sample=decoded)
1253
+
1254
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
1255
+ blend_extent = min(a.shape[-2], b.shape[-2], blend_extent)
1256
+ for y in range(blend_extent):
1257
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
1258
+ y / blend_extent
1259
+ )
1260
+ return b
1261
+
1262
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
1263
+ blend_extent = min(a.shape[-1], b.shape[-1], blend_extent)
1264
+ for x in range(blend_extent):
1265
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
1266
+ x / blend_extent
1267
+ )
1268
+ return b
1269
+
1270
+ def tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput:
1271
+ r"""Encode a batch of images using a tiled encoder.
1272
+
1273
+ Args:
1274
+ x (`torch.Tensor`): Input batch of videos.
1275
+
1276
+ Returns:
1277
+ `torch.Tensor`:
1278
+ The latent representation of the encoded videos.
1279
+ """
1280
+ _, _, num_frames, height, width = x.shape
1281
+ latent_height = height // self.spatial_compression_ratio
1282
+ latent_width = width // self.spatial_compression_ratio
1283
+
1284
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1285
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1286
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
1287
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
1288
+
1289
+ blend_height = tile_latent_min_height - tile_latent_stride_height
1290
+ blend_width = tile_latent_min_width - tile_latent_stride_width
1291
+
1292
+ # Split x into overlapping tiles and encode them separately.
1293
+ # The tiles have an overlap to avoid seams between tiles.
1294
+ rows = []
1295
+ for i in range(0, height, self.tile_sample_stride_height):
1296
+ row = []
1297
+ for j in range(0, width, self.tile_sample_stride_width):
1298
+ self.clear_cache()
1299
+ time = []
1300
+ frame_range = 1 + (num_frames - 1) // 4
1301
+ for k in range(frame_range):
1302
+ self._enc_conv_idx = [0]
1303
+ if k == 0:
1304
+ tile = x[:, :, :1, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width]
1305
+ else:
1306
+ tile = x[
1307
+ :,
1308
+ :,
1309
+ 1 + 4 * (k - 1) : 1 + 4 * k,
1310
+ i : i + self.tile_sample_min_height,
1311
+ j : j + self.tile_sample_min_width,
1312
+ ]
1313
+ tile = self.encoder(tile, feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx)
1314
+ tile = self.quant_conv(tile)
1315
+ time.append(tile)
1316
+ row.append(torch.cat(time, dim=2))
1317
+ rows.append(row)
1318
+ self.clear_cache()
1319
+
1320
+ result_rows = []
1321
+ for i, row in enumerate(rows):
1322
+ result_row = []
1323
+ for j, tile in enumerate(row):
1324
+ # blend the above tile and the left tile
1325
+ # to the current tile and add the current tile to the result row
1326
+ if i > 0:
1327
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
1328
+ if j > 0:
1329
+ tile = self.blend_h(row[j - 1], tile, blend_width)
1330
+ result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width])
1331
+ result_rows.append(torch.cat(result_row, dim=-1))
1332
+
1333
+ enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width]
1334
+ return enc
1335
+
1336
+ def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
1337
+ r"""
1338
+ Decode a batch of images using a tiled decoder.
1339
+
1340
+ Args:
1341
+ z (`torch.Tensor`): Input batch of latent vectors.
1342
+ return_dict (`bool`, *optional*, defaults to `True`):
1343
+ Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
1344
+
1345
+ Returns:
1346
+ [`~models.vae.DecoderOutput`] or `tuple`:
1347
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
1348
+ returned.
1349
+ """
1350
+ _, _, num_frames, height, width = z.shape
1351
+ sample_height = height * self.spatial_compression_ratio
1352
+ sample_width = width * self.spatial_compression_ratio
1353
+
1354
+ tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
1355
+ tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
1356
+ tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
1357
+ tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
1358
+
1359
+ blend_height = self.tile_sample_min_height - self.tile_sample_stride_height
1360
+ blend_width = self.tile_sample_min_width - self.tile_sample_stride_width
1361
+
1362
+ # Split z into overlapping tiles and decode them separately.
1363
+ # The tiles have an overlap to avoid seams between tiles.
1364
+ rows = []
1365
+ for i in range(0, height, tile_latent_stride_height):
1366
+ row = []
1367
+ for j in range(0, width, tile_latent_stride_width):
1368
+ self.clear_cache()
1369
+ time = []
1370
+ for k in range(num_frames):
1371
+ self._conv_idx = [0]
1372
+ tile = z[:, :, k : k + 1, i : i + tile_latent_min_height, j : j + tile_latent_min_width]
1373
+ tile = self.post_quant_conv(tile)
1374
+ decoded = self.decoder(tile, feat_cache=self._feat_map, feat_idx=self._conv_idx)
1375
+ time.append(decoded)
1376
+ row.append(torch.cat(time, dim=2))
1377
+ rows.append(row)
1378
+ self.clear_cache()
1379
+
1380
+ result_rows = []
1381
+ for i, row in enumerate(rows):
1382
+ result_row = []
1383
+ for j, tile in enumerate(row):
1384
+ # blend the above tile and the left tile
1385
+ # to the current tile and add the current tile to the result row
1386
+ if i > 0:
1387
+ tile = self.blend_v(rows[i - 1][j], tile, blend_height)
1388
+ if j > 0:
1389
+ tile = self.blend_h(row[j - 1], tile, blend_width)
1390
+ result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width])
1391
+ result_rows.append(torch.cat(result_row, dim=-1))
1392
+
1393
+ dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width]
1394
+
1395
+ if not return_dict:
1396
+ return (dec,)
1397
+ return DecoderOutput(sample=dec)
1398
+
1399
+ def forward(
1400
+ self,
1401
+ sample: torch.Tensor,
1402
+ sample_posterior: bool = False,
1403
+ return_dict: bool = True,
1404
+ generator: Optional[torch.Generator] = None,
1405
+ ) -> Union[DecoderOutput, torch.Tensor]:
1406
+ """
1407
+ Args:
1408
+ sample (`torch.Tensor`): Input sample.
1409
+ return_dict (`bool`, *optional*, defaults to `True`):
1410
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
1411
+ """
1412
+ x = sample
1413
+ posterior = self.encode(x).latent_dist
1414
+ if sample_posterior:
1415
+ z = posterior.sample(generator=generator)
1416
+ else:
1417
+ z = posterior.mode()
1418
+ dec = self.decode(z, return_dict=return_dict)
1419
+ return dec
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_oobleck.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import math
15
+ from dataclasses import dataclass
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ import torch.nn as nn
21
+ from torch.nn.utils import weight_norm
22
+
23
+ from ...configuration_utils import ConfigMixin, register_to_config
24
+ from ...utils import BaseOutput
25
+ from ...utils.accelerate_utils import apply_forward_hook
26
+ from ...utils.torch_utils import randn_tensor
27
+ from ..modeling_utils import ModelMixin
28
+
29
+
30
+ class Snake1d(nn.Module):
31
+ """
32
+ A 1-dimensional Snake activation function module.
33
+ """
34
+
35
+ def __init__(self, hidden_dim, logscale=True):
36
+ super().__init__()
37
+ self.alpha = nn.Parameter(torch.zeros(1, hidden_dim, 1))
38
+ self.beta = nn.Parameter(torch.zeros(1, hidden_dim, 1))
39
+
40
+ self.alpha.requires_grad = True
41
+ self.beta.requires_grad = True
42
+ self.logscale = logscale
43
+
44
+ def forward(self, hidden_states):
45
+ shape = hidden_states.shape
46
+
47
+ alpha = self.alpha if not self.logscale else torch.exp(self.alpha)
48
+ beta = self.beta if not self.logscale else torch.exp(self.beta)
49
+
50
+ hidden_states = hidden_states.reshape(shape[0], shape[1], -1)
51
+ hidden_states = hidden_states + (beta + 1e-9).reciprocal() * torch.sin(alpha * hidden_states).pow(2)
52
+ hidden_states = hidden_states.reshape(shape)
53
+ return hidden_states
54
+
55
+
56
+ class OobleckResidualUnit(nn.Module):
57
+ """
58
+ A residual unit composed of Snake1d and weight-normalized Conv1d layers with dilations.
59
+ """
60
+
61
+ def __init__(self, dimension: int = 16, dilation: int = 1):
62
+ super().__init__()
63
+ pad = ((7 - 1) * dilation) // 2
64
+
65
+ self.snake1 = Snake1d(dimension)
66
+ self.conv1 = weight_norm(nn.Conv1d(dimension, dimension, kernel_size=7, dilation=dilation, padding=pad))
67
+ self.snake2 = Snake1d(dimension)
68
+ self.conv2 = weight_norm(nn.Conv1d(dimension, dimension, kernel_size=1))
69
+
70
+ def forward(self, hidden_state):
71
+ """
72
+ Forward pass through the residual unit.
73
+
74
+ Args:
75
+ hidden_state (`torch.Tensor` of shape `(batch_size, channels, time_steps)`):
76
+ Input tensor .
77
+
78
+ Returns:
79
+ output_tensor (`torch.Tensor` of shape `(batch_size, channels, time_steps)`)
80
+ Input tensor after passing through the residual unit.
81
+ """
82
+ output_tensor = hidden_state
83
+ output_tensor = self.conv1(self.snake1(output_tensor))
84
+ output_tensor = self.conv2(self.snake2(output_tensor))
85
+
86
+ padding = (hidden_state.shape[-1] - output_tensor.shape[-1]) // 2
87
+ if padding > 0:
88
+ hidden_state = hidden_state[..., padding:-padding]
89
+ output_tensor = hidden_state + output_tensor
90
+ return output_tensor
91
+
92
+
93
+ class OobleckEncoderBlock(nn.Module):
94
+ """Encoder block used in Oobleck encoder."""
95
+
96
+ def __init__(self, input_dim, output_dim, stride: int = 1):
97
+ super().__init__()
98
+
99
+ self.res_unit1 = OobleckResidualUnit(input_dim, dilation=1)
100
+ self.res_unit2 = OobleckResidualUnit(input_dim, dilation=3)
101
+ self.res_unit3 = OobleckResidualUnit(input_dim, dilation=9)
102
+ self.snake1 = Snake1d(input_dim)
103
+ self.conv1 = weight_norm(
104
+ nn.Conv1d(input_dim, output_dim, kernel_size=2 * stride, stride=stride, padding=math.ceil(stride / 2))
105
+ )
106
+
107
+ def forward(self, hidden_state):
108
+ hidden_state = self.res_unit1(hidden_state)
109
+ hidden_state = self.res_unit2(hidden_state)
110
+ hidden_state = self.snake1(self.res_unit3(hidden_state))
111
+ hidden_state = self.conv1(hidden_state)
112
+
113
+ return hidden_state
114
+
115
+
116
+ class OobleckDecoderBlock(nn.Module):
117
+ """Decoder block used in Oobleck decoder."""
118
+
119
+ def __init__(self, input_dim, output_dim, stride: int = 1):
120
+ super().__init__()
121
+
122
+ self.snake1 = Snake1d(input_dim)
123
+ self.conv_t1 = weight_norm(
124
+ nn.ConvTranspose1d(
125
+ input_dim,
126
+ output_dim,
127
+ kernel_size=2 * stride,
128
+ stride=stride,
129
+ padding=math.ceil(stride / 2),
130
+ )
131
+ )
132
+ self.res_unit1 = OobleckResidualUnit(output_dim, dilation=1)
133
+ self.res_unit2 = OobleckResidualUnit(output_dim, dilation=3)
134
+ self.res_unit3 = OobleckResidualUnit(output_dim, dilation=9)
135
+
136
+ def forward(self, hidden_state):
137
+ hidden_state = self.snake1(hidden_state)
138
+ hidden_state = self.conv_t1(hidden_state)
139
+ hidden_state = self.res_unit1(hidden_state)
140
+ hidden_state = self.res_unit2(hidden_state)
141
+ hidden_state = self.res_unit3(hidden_state)
142
+
143
+ return hidden_state
144
+
145
+
146
+ class OobleckDiagonalGaussianDistribution(object):
147
+ def __init__(self, parameters: torch.Tensor, deterministic: bool = False):
148
+ self.parameters = parameters
149
+ self.mean, self.scale = parameters.chunk(2, dim=1)
150
+ self.std = nn.functional.softplus(self.scale) + 1e-4
151
+ self.var = self.std * self.std
152
+ self.logvar = torch.log(self.var)
153
+ self.deterministic = deterministic
154
+
155
+ def sample(self, generator: Optional[torch.Generator] = None) -> torch.Tensor:
156
+ # make sure sample is on the same device as the parameters and has same dtype
157
+ sample = randn_tensor(
158
+ self.mean.shape,
159
+ generator=generator,
160
+ device=self.parameters.device,
161
+ dtype=self.parameters.dtype,
162
+ )
163
+ x = self.mean + self.std * sample
164
+ return x
165
+
166
+ def kl(self, other: "OobleckDiagonalGaussianDistribution" = None) -> torch.Tensor:
167
+ if self.deterministic:
168
+ return torch.Tensor([0.0])
169
+ else:
170
+ if other is None:
171
+ return (self.mean * self.mean + self.var - self.logvar - 1.0).sum(1).mean()
172
+ else:
173
+ normalized_diff = torch.pow(self.mean - other.mean, 2) / other.var
174
+ var_ratio = self.var / other.var
175
+ logvar_diff = self.logvar - other.logvar
176
+
177
+ kl = normalized_diff + var_ratio + logvar_diff - 1
178
+
179
+ kl = kl.sum(1).mean()
180
+ return kl
181
+
182
+ def mode(self) -> torch.Tensor:
183
+ return self.mean
184
+
185
+
186
+ @dataclass
187
+ class AutoencoderOobleckOutput(BaseOutput):
188
+ """
189
+ Output of AutoencoderOobleck encoding method.
190
+
191
+ Args:
192
+ latent_dist (`OobleckDiagonalGaussianDistribution`):
193
+ Encoded outputs of `Encoder` represented as the mean and standard deviation of
194
+ `OobleckDiagonalGaussianDistribution`. `OobleckDiagonalGaussianDistribution` allows for sampling latents
195
+ from the distribution.
196
+ """
197
+
198
+ latent_dist: "OobleckDiagonalGaussianDistribution" # noqa: F821
199
+
200
+
201
+ @dataclass
202
+ class OobleckDecoderOutput(BaseOutput):
203
+ r"""
204
+ Output of decoding method.
205
+
206
+ Args:
207
+ sample (`torch.Tensor` of shape `(batch_size, audio_channels, sequence_length)`):
208
+ The decoded output sample from the last layer of the model.
209
+ """
210
+
211
+ sample: torch.Tensor
212
+
213
+
214
+ class OobleckEncoder(nn.Module):
215
+ """Oobleck Encoder"""
216
+
217
+ def __init__(self, encoder_hidden_size, audio_channels, downsampling_ratios, channel_multiples):
218
+ super().__init__()
219
+
220
+ strides = downsampling_ratios
221
+ channel_multiples = [1] + channel_multiples
222
+
223
+ # Create first convolution
224
+ self.conv1 = weight_norm(nn.Conv1d(audio_channels, encoder_hidden_size, kernel_size=7, padding=3))
225
+
226
+ self.block = []
227
+ # Create EncoderBlocks that double channels as they downsample by `stride`
228
+ for stride_index, stride in enumerate(strides):
229
+ self.block += [
230
+ OobleckEncoderBlock(
231
+ input_dim=encoder_hidden_size * channel_multiples[stride_index],
232
+ output_dim=encoder_hidden_size * channel_multiples[stride_index + 1],
233
+ stride=stride,
234
+ )
235
+ ]
236
+
237
+ self.block = nn.ModuleList(self.block)
238
+ d_model = encoder_hidden_size * channel_multiples[-1]
239
+ self.snake1 = Snake1d(d_model)
240
+ self.conv2 = weight_norm(nn.Conv1d(d_model, encoder_hidden_size, kernel_size=3, padding=1))
241
+
242
+ def forward(self, hidden_state):
243
+ hidden_state = self.conv1(hidden_state)
244
+
245
+ for module in self.block:
246
+ hidden_state = module(hidden_state)
247
+
248
+ hidden_state = self.snake1(hidden_state)
249
+ hidden_state = self.conv2(hidden_state)
250
+
251
+ return hidden_state
252
+
253
+
254
+ class OobleckDecoder(nn.Module):
255
+ """Oobleck Decoder"""
256
+
257
+ def __init__(self, channels, input_channels, audio_channels, upsampling_ratios, channel_multiples):
258
+ super().__init__()
259
+
260
+ strides = upsampling_ratios
261
+ channel_multiples = [1] + channel_multiples
262
+
263
+ # Add first conv layer
264
+ self.conv1 = weight_norm(nn.Conv1d(input_channels, channels * channel_multiples[-1], kernel_size=7, padding=3))
265
+
266
+ # Add upsampling + MRF blocks
267
+ block = []
268
+ for stride_index, stride in enumerate(strides):
269
+ block += [
270
+ OobleckDecoderBlock(
271
+ input_dim=channels * channel_multiples[len(strides) - stride_index],
272
+ output_dim=channels * channel_multiples[len(strides) - stride_index - 1],
273
+ stride=stride,
274
+ )
275
+ ]
276
+
277
+ self.block = nn.ModuleList(block)
278
+ output_dim = channels
279
+ self.snake1 = Snake1d(output_dim)
280
+ self.conv2 = weight_norm(nn.Conv1d(channels, audio_channels, kernel_size=7, padding=3, bias=False))
281
+
282
+ def forward(self, hidden_state):
283
+ hidden_state = self.conv1(hidden_state)
284
+
285
+ for layer in self.block:
286
+ hidden_state = layer(hidden_state)
287
+
288
+ hidden_state = self.snake1(hidden_state)
289
+ hidden_state = self.conv2(hidden_state)
290
+
291
+ return hidden_state
292
+
293
+
294
+ class AutoencoderOobleck(ModelMixin, ConfigMixin):
295
+ r"""
296
+ An autoencoder for encoding waveforms into latents and decoding latent representations into waveforms. First
297
+ introduced in Stable Audio.
298
+
299
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
300
+ for all models (such as downloading or saving).
301
+
302
+ Parameters:
303
+ encoder_hidden_size (`int`, *optional*, defaults to 128):
304
+ Intermediate representation dimension for the encoder.
305
+ downsampling_ratios (`List[int]`, *optional*, defaults to `[2, 4, 4, 8, 8]`):
306
+ Ratios for downsampling in the encoder. These are used in reverse order for upsampling in the decoder.
307
+ channel_multiples (`List[int]`, *optional*, defaults to `[1, 2, 4, 8, 16]`):
308
+ Multiples used to determine the hidden sizes of the hidden layers.
309
+ decoder_channels (`int`, *optional*, defaults to 128):
310
+ Intermediate representation dimension for the decoder.
311
+ decoder_input_channels (`int`, *optional*, defaults to 64):
312
+ Input dimension for the decoder. Corresponds to the latent dimension.
313
+ audio_channels (`int`, *optional*, defaults to 2):
314
+ Number of channels in the audio data. Either 1 for mono or 2 for stereo.
315
+ sampling_rate (`int`, *optional*, defaults to 44100):
316
+ The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
317
+ """
318
+
319
+ _supports_gradient_checkpointing = False
320
+ _supports_group_offloading = False
321
+
322
+ @register_to_config
323
+ def __init__(
324
+ self,
325
+ encoder_hidden_size=128,
326
+ downsampling_ratios=[2, 4, 4, 8, 8],
327
+ channel_multiples=[1, 2, 4, 8, 16],
328
+ decoder_channels=128,
329
+ decoder_input_channels=64,
330
+ audio_channels=2,
331
+ sampling_rate=44100,
332
+ ):
333
+ super().__init__()
334
+
335
+ self.encoder_hidden_size = encoder_hidden_size
336
+ self.downsampling_ratios = downsampling_ratios
337
+ self.decoder_channels = decoder_channels
338
+ self.upsampling_ratios = downsampling_ratios[::-1]
339
+ self.hop_length = int(np.prod(downsampling_ratios))
340
+ self.sampling_rate = sampling_rate
341
+
342
+ self.encoder = OobleckEncoder(
343
+ encoder_hidden_size=encoder_hidden_size,
344
+ audio_channels=audio_channels,
345
+ downsampling_ratios=downsampling_ratios,
346
+ channel_multiples=channel_multiples,
347
+ )
348
+
349
+ self.decoder = OobleckDecoder(
350
+ channels=decoder_channels,
351
+ input_channels=decoder_input_channels,
352
+ audio_channels=audio_channels,
353
+ upsampling_ratios=self.upsampling_ratios,
354
+ channel_multiples=channel_multiples,
355
+ )
356
+
357
+ self.use_slicing = False
358
+
359
+ def enable_slicing(self):
360
+ r"""
361
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
362
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
363
+ """
364
+ self.use_slicing = True
365
+
366
+ def disable_slicing(self):
367
+ r"""
368
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
369
+ decoding in one step.
370
+ """
371
+ self.use_slicing = False
372
+
373
+ @apply_forward_hook
374
+ def encode(
375
+ self, x: torch.Tensor, return_dict: bool = True
376
+ ) -> Union[AutoencoderOobleckOutput, Tuple[OobleckDiagonalGaussianDistribution]]:
377
+ """
378
+ Encode a batch of images into latents.
379
+
380
+ Args:
381
+ x (`torch.Tensor`): Input batch of images.
382
+ return_dict (`bool`, *optional*, defaults to `True`):
383
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
384
+
385
+ Returns:
386
+ The latent representations of the encoded images. If `return_dict` is True, a
387
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
388
+ """
389
+ if self.use_slicing and x.shape[0] > 1:
390
+ encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
391
+ h = torch.cat(encoded_slices)
392
+ else:
393
+ h = self.encoder(x)
394
+
395
+ posterior = OobleckDiagonalGaussianDistribution(h)
396
+
397
+ if not return_dict:
398
+ return (posterior,)
399
+
400
+ return AutoencoderOobleckOutput(latent_dist=posterior)
401
+
402
+ def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[OobleckDecoderOutput, torch.Tensor]:
403
+ dec = self.decoder(z)
404
+
405
+ if not return_dict:
406
+ return (dec,)
407
+
408
+ return OobleckDecoderOutput(sample=dec)
409
+
410
+ @apply_forward_hook
411
+ def decode(
412
+ self, z: torch.FloatTensor, return_dict: bool = True, generator=None
413
+ ) -> Union[OobleckDecoderOutput, torch.FloatTensor]:
414
+ """
415
+ Decode a batch of images.
416
+
417
+ Args:
418
+ z (`torch.Tensor`): Input batch of latent vectors.
419
+ return_dict (`bool`, *optional*, defaults to `True`):
420
+ Whether to return a [`~models.vae.OobleckDecoderOutput`] instead of a plain tuple.
421
+
422
+ Returns:
423
+ [`~models.vae.OobleckDecoderOutput`] or `tuple`:
424
+ If return_dict is True, a [`~models.vae.OobleckDecoderOutput`] is returned, otherwise a plain `tuple`
425
+ is returned.
426
+
427
+ """
428
+ if self.use_slicing and z.shape[0] > 1:
429
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
430
+ decoded = torch.cat(decoded_slices)
431
+ else:
432
+ decoded = self._decode(z).sample
433
+
434
+ if not return_dict:
435
+ return (decoded,)
436
+
437
+ return OobleckDecoderOutput(sample=decoded)
438
+
439
+ def forward(
440
+ self,
441
+ sample: torch.Tensor,
442
+ sample_posterior: bool = False,
443
+ return_dict: bool = True,
444
+ generator: Optional[torch.Generator] = None,
445
+ ) -> Union[OobleckDecoderOutput, torch.Tensor]:
446
+ r"""
447
+ Args:
448
+ sample (`torch.Tensor`): Input sample.
449
+ sample_posterior (`bool`, *optional*, defaults to `False`):
450
+ Whether to sample from the posterior.
451
+ return_dict (`bool`, *optional*, defaults to `True`):
452
+ Whether or not to return a [`OobleckDecoderOutput`] instead of a plain tuple.
453
+ """
454
+ x = sample
455
+ posterior = self.encode(x).latent_dist
456
+ if sample_posterior:
457
+ z = posterior.sample(generator=generator)
458
+ else:
459
+ z = posterior.mode()
460
+ dec = self.decode(z).sample
461
+
462
+ if not return_dict:
463
+ return (dec,)
464
+
465
+ return OobleckDecoderOutput(sample=dec)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/autoencoder_tiny.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Ollin Boer Bohan and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from dataclasses import dataclass
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...utils import BaseOutput
23
+ from ...utils.accelerate_utils import apply_forward_hook
24
+ from ..modeling_utils import ModelMixin
25
+ from .vae import DecoderOutput, DecoderTiny, EncoderTiny
26
+
27
+
28
+ @dataclass
29
+ class AutoencoderTinyOutput(BaseOutput):
30
+ """
31
+ Output of AutoencoderTiny encoding method.
32
+
33
+ Args:
34
+ latents (`torch.Tensor`): Encoded outputs of the `Encoder`.
35
+
36
+ """
37
+
38
+ latents: torch.Tensor
39
+
40
+
41
+ class AutoencoderTiny(ModelMixin, ConfigMixin):
42
+ r"""
43
+ A tiny distilled VAE model for encoding images into latents and decoding latent representations into images.
44
+
45
+ [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`.
46
+
47
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for
48
+ all models (such as downloading or saving).
49
+
50
+ Parameters:
51
+ in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
52
+ out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
53
+ encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
54
+ Tuple of integers representing the number of output channels for each encoder block. The length of the
55
+ tuple should be equal to the number of encoder blocks.
56
+ decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
57
+ Tuple of integers representing the number of output channels for each decoder block. The length of the
58
+ tuple should be equal to the number of decoder blocks.
59
+ act_fn (`str`, *optional*, defaults to `"relu"`):
60
+ Activation function to be used throughout the model.
61
+ latent_channels (`int`, *optional*, defaults to 4):
62
+ Number of channels in the latent representation. The latent space acts as a compressed representation of
63
+ the input image.
64
+ upsampling_scaling_factor (`int`, *optional*, defaults to 2):
65
+ Scaling factor for upsampling in the decoder. It determines the size of the output image during the
66
+ upsampling process.
67
+ num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`):
68
+ Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The
69
+ length of the tuple should be equal to the number of stages in the encoder. Each stage has a different
70
+ number of encoder blocks.
71
+ num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`):
72
+ Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The
73
+ length of the tuple should be equal to the number of stages in the decoder. Each stage has a different
74
+ number of decoder blocks.
75
+ latent_magnitude (`float`, *optional*, defaults to 3.0):
76
+ Magnitude of the latent representation. This parameter scales the latent representation values to control
77
+ the extent of information preservation.
78
+ latent_shift (float, *optional*, defaults to 0.5):
79
+ Shift applied to the latent representation. This parameter controls the center of the latent space.
80
+ scaling_factor (`float`, *optional*, defaults to 1.0):
81
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
82
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
83
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
84
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
85
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
86
+ Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper. For this
87
+ Autoencoder, however, no such scaling factor was used, hence the value of 1.0 as the default.
88
+ force_upcast (`bool`, *optional*, default to `False`):
89
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
90
+ can be fine-tuned / trained to a lower range without losing too much precision, in which case
91
+ `force_upcast` can be set to `False` (see this fp16-friendly
92
+ [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)).
93
+ """
94
+
95
+ _supports_gradient_checkpointing = True
96
+
97
+ @register_to_config
98
+ def __init__(
99
+ self,
100
+ in_channels: int = 3,
101
+ out_channels: int = 3,
102
+ encoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
103
+ decoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
104
+ act_fn: str = "relu",
105
+ upsample_fn: str = "nearest",
106
+ latent_channels: int = 4,
107
+ upsampling_scaling_factor: int = 2,
108
+ num_encoder_blocks: Tuple[int, ...] = (1, 3, 3, 3),
109
+ num_decoder_blocks: Tuple[int, ...] = (3, 3, 3, 1),
110
+ latent_magnitude: int = 3,
111
+ latent_shift: float = 0.5,
112
+ force_upcast: bool = False,
113
+ scaling_factor: float = 1.0,
114
+ shift_factor: float = 0.0,
115
+ ):
116
+ super().__init__()
117
+
118
+ if len(encoder_block_out_channels) != len(num_encoder_blocks):
119
+ raise ValueError("`encoder_block_out_channels` should have the same length as `num_encoder_blocks`.")
120
+ if len(decoder_block_out_channels) != len(num_decoder_blocks):
121
+ raise ValueError("`decoder_block_out_channels` should have the same length as `num_decoder_blocks`.")
122
+
123
+ self.encoder = EncoderTiny(
124
+ in_channels=in_channels,
125
+ out_channels=latent_channels,
126
+ num_blocks=num_encoder_blocks,
127
+ block_out_channels=encoder_block_out_channels,
128
+ act_fn=act_fn,
129
+ )
130
+
131
+ self.decoder = DecoderTiny(
132
+ in_channels=latent_channels,
133
+ out_channels=out_channels,
134
+ num_blocks=num_decoder_blocks,
135
+ block_out_channels=decoder_block_out_channels,
136
+ upsampling_scaling_factor=upsampling_scaling_factor,
137
+ act_fn=act_fn,
138
+ upsample_fn=upsample_fn,
139
+ )
140
+
141
+ self.latent_magnitude = latent_magnitude
142
+ self.latent_shift = latent_shift
143
+ self.scaling_factor = scaling_factor
144
+
145
+ self.use_slicing = False
146
+ self.use_tiling = False
147
+
148
+ # only relevant if vae tiling is enabled
149
+ self.spatial_scale_factor = 2**out_channels
150
+ self.tile_overlap_factor = 0.125
151
+ self.tile_sample_min_size = 512
152
+ self.tile_latent_min_size = self.tile_sample_min_size // self.spatial_scale_factor
153
+
154
+ self.register_to_config(block_out_channels=decoder_block_out_channels)
155
+ self.register_to_config(force_upcast=False)
156
+
157
+ def scale_latents(self, x: torch.Tensor) -> torch.Tensor:
158
+ """raw latents -> [0, 1]"""
159
+ return x.div(2 * self.latent_magnitude).add(self.latent_shift).clamp(0, 1)
160
+
161
+ def unscale_latents(self, x: torch.Tensor) -> torch.Tensor:
162
+ """[0, 1] -> raw latents"""
163
+ return x.sub(self.latent_shift).mul(2 * self.latent_magnitude)
164
+
165
+ def enable_slicing(self) -> None:
166
+ r"""
167
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
168
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
169
+ """
170
+ self.use_slicing = True
171
+
172
+ def disable_slicing(self) -> None:
173
+ r"""
174
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
175
+ decoding in one step.
176
+ """
177
+ self.use_slicing = False
178
+
179
+ def enable_tiling(self, use_tiling: bool = True) -> None:
180
+ r"""
181
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
182
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
183
+ processing larger images.
184
+ """
185
+ self.use_tiling = use_tiling
186
+
187
+ def disable_tiling(self) -> None:
188
+ r"""
189
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
190
+ decoding in one step.
191
+ """
192
+ self.enable_tiling(False)
193
+
194
+ def _tiled_encode(self, x: torch.Tensor) -> torch.Tensor:
195
+ r"""Encode a batch of images using a tiled encoder.
196
+
197
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
198
+ steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
199
+ tiles overlap and are blended together to form a smooth output.
200
+
201
+ Args:
202
+ x (`torch.Tensor`): Input batch of images.
203
+
204
+ Returns:
205
+ `torch.Tensor`: Encoded batch of images.
206
+ """
207
+ # scale of encoder output relative to input
208
+ sf = self.spatial_scale_factor
209
+ tile_size = self.tile_sample_min_size
210
+
211
+ # number of pixels to blend and to traverse between tile
212
+ blend_size = int(tile_size * self.tile_overlap_factor)
213
+ traverse_size = tile_size - blend_size
214
+
215
+ # tiles index (up/left)
216
+ ti = range(0, x.shape[-2], traverse_size)
217
+ tj = range(0, x.shape[-1], traverse_size)
218
+
219
+ # mask for blending
220
+ blend_masks = torch.stack(
221
+ torch.meshgrid([torch.arange(tile_size / sf) / (blend_size / sf - 1)] * 2, indexing="ij")
222
+ )
223
+ blend_masks = blend_masks.clamp(0, 1).to(x.device)
224
+
225
+ # output array
226
+ out = torch.zeros(x.shape[0], 4, x.shape[-2] // sf, x.shape[-1] // sf, device=x.device)
227
+ for i in ti:
228
+ for j in tj:
229
+ tile_in = x[..., i : i + tile_size, j : j + tile_size]
230
+ # tile result
231
+ tile_out = out[..., i // sf : (i + tile_size) // sf, j // sf : (j + tile_size) // sf]
232
+ tile = self.encoder(tile_in)
233
+ h, w = tile.shape[-2], tile.shape[-1]
234
+ # blend tile result into output
235
+ blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
236
+ blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
237
+ blend_mask = blend_mask_i * blend_mask_j
238
+ tile, blend_mask = tile[..., :h, :w], blend_mask[..., :h, :w]
239
+ tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
240
+ return out
241
+
242
+ def _tiled_decode(self, x: torch.Tensor) -> torch.Tensor:
243
+ r"""Encode a batch of images using a tiled encoder.
244
+
245
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
246
+ steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
247
+ tiles overlap and are blended together to form a smooth output.
248
+
249
+ Args:
250
+ x (`torch.Tensor`): Input batch of images.
251
+
252
+ Returns:
253
+ `torch.Tensor`: Encoded batch of images.
254
+ """
255
+ # scale of decoder output relative to input
256
+ sf = self.spatial_scale_factor
257
+ tile_size = self.tile_latent_min_size
258
+
259
+ # number of pixels to blend and to traverse between tiles
260
+ blend_size = int(tile_size * self.tile_overlap_factor)
261
+ traverse_size = tile_size - blend_size
262
+
263
+ # tiles index (up/left)
264
+ ti = range(0, x.shape[-2], traverse_size)
265
+ tj = range(0, x.shape[-1], traverse_size)
266
+
267
+ # mask for blending
268
+ blend_masks = torch.stack(
269
+ torch.meshgrid([torch.arange(tile_size * sf) / (blend_size * sf - 1)] * 2, indexing="ij")
270
+ )
271
+ blend_masks = blend_masks.clamp(0, 1).to(x.device)
272
+
273
+ # output array
274
+ out = torch.zeros(x.shape[0], 3, x.shape[-2] * sf, x.shape[-1] * sf, device=x.device)
275
+ for i in ti:
276
+ for j in tj:
277
+ tile_in = x[..., i : i + tile_size, j : j + tile_size]
278
+ # tile result
279
+ tile_out = out[..., i * sf : (i + tile_size) * sf, j * sf : (j + tile_size) * sf]
280
+ tile = self.decoder(tile_in)
281
+ h, w = tile.shape[-2], tile.shape[-1]
282
+ # blend tile result into output
283
+ blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
284
+ blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
285
+ blend_mask = (blend_mask_i * blend_mask_j)[..., :h, :w]
286
+ tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
287
+ return out
288
+
289
+ @apply_forward_hook
290
+ def encode(self, x: torch.Tensor, return_dict: bool = True) -> Union[AutoencoderTinyOutput, Tuple[torch.Tensor]]:
291
+ if self.use_slicing and x.shape[0] > 1:
292
+ output = [
293
+ self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x_slice) for x_slice in x.split(1)
294
+ ]
295
+ output = torch.cat(output)
296
+ else:
297
+ output = self._tiled_encode(x) if self.use_tiling else self.encoder(x)
298
+
299
+ if not return_dict:
300
+ return (output,)
301
+
302
+ return AutoencoderTinyOutput(latents=output)
303
+
304
+ @apply_forward_hook
305
+ def decode(
306
+ self, x: torch.Tensor, generator: Optional[torch.Generator] = None, return_dict: bool = True
307
+ ) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
308
+ if self.use_slicing and x.shape[0] > 1:
309
+ output = [
310
+ self._tiled_decode(x_slice) if self.use_tiling else self.decoder(x_slice) for x_slice in x.split(1)
311
+ ]
312
+ output = torch.cat(output)
313
+ else:
314
+ output = self._tiled_decode(x) if self.use_tiling else self.decoder(x)
315
+
316
+ if not return_dict:
317
+ return (output,)
318
+
319
+ return DecoderOutput(sample=output)
320
+
321
+ def forward(
322
+ self,
323
+ sample: torch.Tensor,
324
+ return_dict: bool = True,
325
+ ) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
326
+ r"""
327
+ Args:
328
+ sample (`torch.Tensor`): Input sample.
329
+ return_dict (`bool`, *optional*, defaults to `True`):
330
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
331
+ """
332
+ enc = self.encode(sample).latents
333
+
334
+ # scale latents to be in [0, 1], then quantize latents to a byte tensor,
335
+ # as if we were storing the latents in an RGBA uint8 image.
336
+ scaled_enc = self.scale_latents(enc).mul_(255).round_().byte()
337
+
338
+ # unquantize latents back into [0, 1], then unscale latents back to their original range,
339
+ # as if we were loading the latents from an RGBA uint8 image.
340
+ unscaled_enc = self.unscale_latents(scaled_enc / 255.0)
341
+
342
+ dec = self.decode(unscaled_enc).sample
343
+
344
+ if not return_dict:
345
+ return (dec,)
346
+ return DecoderOutput(sample=dec)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/consistency_decoder_vae.py ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Dict, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn.functional as F
19
+ from torch import nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...schedulers import ConsistencyDecoderScheduler
23
+ from ...utils import BaseOutput
24
+ from ...utils.accelerate_utils import apply_forward_hook
25
+ from ...utils.torch_utils import randn_tensor
26
+ from ..attention_processor import (
27
+ ADDED_KV_ATTENTION_PROCESSORS,
28
+ CROSS_ATTENTION_PROCESSORS,
29
+ AttentionProcessor,
30
+ AttnAddedKVProcessor,
31
+ AttnProcessor,
32
+ )
33
+ from ..modeling_utils import ModelMixin
34
+ from ..unets.unet_2d import UNet2DModel
35
+ from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder
36
+
37
+
38
+ @dataclass
39
+ class ConsistencyDecoderVAEOutput(BaseOutput):
40
+ """
41
+ Output of encoding method.
42
+
43
+ Args:
44
+ latent_dist (`DiagonalGaussianDistribution`):
45
+ Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
46
+ `DiagonalGaussianDistribution` allows for sampling latents from the distribution.
47
+ """
48
+
49
+ latent_dist: "DiagonalGaussianDistribution"
50
+
51
+
52
+ class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
53
+ r"""
54
+ The consistency decoder used with DALL-E 3.
55
+
56
+ Examples:
57
+ ```py
58
+ >>> import torch
59
+ >>> from diffusers import StableDiffusionPipeline, ConsistencyDecoderVAE
60
+
61
+ >>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
62
+ >>> pipe = StableDiffusionPipeline.from_pretrained(
63
+ ... "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
64
+ ... ).to("cuda")
65
+
66
+ >>> image = pipe("horse", generator=torch.manual_seed(0)).images[0]
67
+ >>> image
68
+ ```
69
+ """
70
+
71
+ _supports_group_offloading = False
72
+
73
+ @register_to_config
74
+ def __init__(
75
+ self,
76
+ scaling_factor: float = 0.18215,
77
+ latent_channels: int = 4,
78
+ sample_size: int = 32,
79
+ encoder_act_fn: str = "silu",
80
+ encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
81
+ encoder_double_z: bool = True,
82
+ encoder_down_block_types: Tuple[str, ...] = (
83
+ "DownEncoderBlock2D",
84
+ "DownEncoderBlock2D",
85
+ "DownEncoderBlock2D",
86
+ "DownEncoderBlock2D",
87
+ ),
88
+ encoder_in_channels: int = 3,
89
+ encoder_layers_per_block: int = 2,
90
+ encoder_norm_num_groups: int = 32,
91
+ encoder_out_channels: int = 4,
92
+ decoder_add_attention: bool = False,
93
+ decoder_block_out_channels: Tuple[int, ...] = (320, 640, 1024, 1024),
94
+ decoder_down_block_types: Tuple[str, ...] = (
95
+ "ResnetDownsampleBlock2D",
96
+ "ResnetDownsampleBlock2D",
97
+ "ResnetDownsampleBlock2D",
98
+ "ResnetDownsampleBlock2D",
99
+ ),
100
+ decoder_downsample_padding: int = 1,
101
+ decoder_in_channels: int = 7,
102
+ decoder_layers_per_block: int = 3,
103
+ decoder_norm_eps: float = 1e-05,
104
+ decoder_norm_num_groups: int = 32,
105
+ decoder_num_train_timesteps: int = 1024,
106
+ decoder_out_channels: int = 6,
107
+ decoder_resnet_time_scale_shift: str = "scale_shift",
108
+ decoder_time_embedding_type: str = "learned",
109
+ decoder_up_block_types: Tuple[str, ...] = (
110
+ "ResnetUpsampleBlock2D",
111
+ "ResnetUpsampleBlock2D",
112
+ "ResnetUpsampleBlock2D",
113
+ "ResnetUpsampleBlock2D",
114
+ ),
115
+ ):
116
+ super().__init__()
117
+ self.encoder = Encoder(
118
+ act_fn=encoder_act_fn,
119
+ block_out_channels=encoder_block_out_channels,
120
+ double_z=encoder_double_z,
121
+ down_block_types=encoder_down_block_types,
122
+ in_channels=encoder_in_channels,
123
+ layers_per_block=encoder_layers_per_block,
124
+ norm_num_groups=encoder_norm_num_groups,
125
+ out_channels=encoder_out_channels,
126
+ )
127
+
128
+ self.decoder_unet = UNet2DModel(
129
+ add_attention=decoder_add_attention,
130
+ block_out_channels=decoder_block_out_channels,
131
+ down_block_types=decoder_down_block_types,
132
+ downsample_padding=decoder_downsample_padding,
133
+ in_channels=decoder_in_channels,
134
+ layers_per_block=decoder_layers_per_block,
135
+ norm_eps=decoder_norm_eps,
136
+ norm_num_groups=decoder_norm_num_groups,
137
+ num_train_timesteps=decoder_num_train_timesteps,
138
+ out_channels=decoder_out_channels,
139
+ resnet_time_scale_shift=decoder_resnet_time_scale_shift,
140
+ time_embedding_type=decoder_time_embedding_type,
141
+ up_block_types=decoder_up_block_types,
142
+ )
143
+ self.decoder_scheduler = ConsistencyDecoderScheduler()
144
+ self.register_to_config(block_out_channels=encoder_block_out_channels)
145
+ self.register_to_config(force_upcast=False)
146
+ self.register_buffer(
147
+ "means",
148
+ torch.tensor([0.38862467, 0.02253063, 0.07381133, -0.0171294])[None, :, None, None],
149
+ persistent=False,
150
+ )
151
+ self.register_buffer(
152
+ "stds", torch.tensor([0.9654121, 1.0440036, 0.76147926, 0.77022034])[None, :, None, None], persistent=False
153
+ )
154
+
155
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
156
+
157
+ self.use_slicing = False
158
+ self.use_tiling = False
159
+
160
+ # only relevant if vae tiling is enabled
161
+ self.tile_sample_min_size = self.config.sample_size
162
+ sample_size = (
163
+ self.config.sample_size[0]
164
+ if isinstance(self.config.sample_size, (list, tuple))
165
+ else self.config.sample_size
166
+ )
167
+ self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
168
+ self.tile_overlap_factor = 0.25
169
+
170
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_tiling
171
+ def enable_tiling(self, use_tiling: bool = True):
172
+ r"""
173
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
174
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
175
+ processing larger images.
176
+ """
177
+ self.use_tiling = use_tiling
178
+
179
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_tiling
180
+ def disable_tiling(self):
181
+ r"""
182
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
183
+ decoding in one step.
184
+ """
185
+ self.enable_tiling(False)
186
+
187
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_slicing
188
+ def enable_slicing(self):
189
+ r"""
190
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
191
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
192
+ """
193
+ self.use_slicing = True
194
+
195
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_slicing
196
+ def disable_slicing(self):
197
+ r"""
198
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
199
+ decoding in one step.
200
+ """
201
+ self.use_slicing = False
202
+
203
+ @property
204
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
205
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
206
+ r"""
207
+ Returns:
208
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
209
+ indexed by its weight name.
210
+ """
211
+ # set recursively
212
+ processors = {}
213
+
214
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
215
+ if hasattr(module, "get_processor"):
216
+ processors[f"{name}.processor"] = module.get_processor()
217
+
218
+ for sub_name, child in module.named_children():
219
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
220
+
221
+ return processors
222
+
223
+ for name, module in self.named_children():
224
+ fn_recursive_add_processors(name, module, processors)
225
+
226
+ return processors
227
+
228
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
229
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
230
+ r"""
231
+ Sets the attention processor to use to compute attention.
232
+
233
+ Parameters:
234
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
235
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
236
+ for **all** `Attention` layers.
237
+
238
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
239
+ processor. This is strongly recommended when setting trainable attention processors.
240
+
241
+ """
242
+ count = len(self.attn_processors.keys())
243
+
244
+ if isinstance(processor, dict) and len(processor) != count:
245
+ raise ValueError(
246
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
247
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
248
+ )
249
+
250
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
251
+ if hasattr(module, "set_processor"):
252
+ if not isinstance(processor, dict):
253
+ module.set_processor(processor)
254
+ else:
255
+ module.set_processor(processor.pop(f"{name}.processor"))
256
+
257
+ for sub_name, child in module.named_children():
258
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
259
+
260
+ for name, module in self.named_children():
261
+ fn_recursive_attn_processor(name, module, processor)
262
+
263
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
264
+ def set_default_attn_processor(self):
265
+ """
266
+ Disables custom attention processors and sets the default attention implementation.
267
+ """
268
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
269
+ processor = AttnAddedKVProcessor()
270
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
271
+ processor = AttnProcessor()
272
+ else:
273
+ raise ValueError(
274
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
275
+ )
276
+
277
+ self.set_attn_processor(processor)
278
+
279
+ @apply_forward_hook
280
+ def encode(
281
+ self, x: torch.Tensor, return_dict: bool = True
282
+ ) -> Union[ConsistencyDecoderVAEOutput, Tuple[DiagonalGaussianDistribution]]:
283
+ """
284
+ Encode a batch of images into latents.
285
+
286
+ Args:
287
+ x (`torch.Tensor`): Input batch of images.
288
+ return_dict (`bool`, *optional*, defaults to `True`):
289
+ Whether to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
290
+ instead of a plain tuple.
291
+
292
+ Returns:
293
+ The latent representations of the encoded images. If `return_dict` is True, a
294
+ [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a
295
+ plain `tuple` is returned.
296
+ """
297
+ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
298
+ return self.tiled_encode(x, return_dict=return_dict)
299
+
300
+ if self.use_slicing and x.shape[0] > 1:
301
+ encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
302
+ h = torch.cat(encoded_slices)
303
+ else:
304
+ h = self.encoder(x)
305
+
306
+ moments = self.quant_conv(h)
307
+ posterior = DiagonalGaussianDistribution(moments)
308
+
309
+ if not return_dict:
310
+ return (posterior,)
311
+
312
+ return ConsistencyDecoderVAEOutput(latent_dist=posterior)
313
+
314
+ @apply_forward_hook
315
+ def decode(
316
+ self,
317
+ z: torch.Tensor,
318
+ generator: Optional[torch.Generator] = None,
319
+ return_dict: bool = True,
320
+ num_inference_steps: int = 2,
321
+ ) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
322
+ """
323
+ Decodes the input latent vector `z` using the consistency decoder VAE model.
324
+
325
+ Args:
326
+ z (torch.Tensor): The input latent vector.
327
+ generator (Optional[torch.Generator]): The random number generator. Default is None.
328
+ return_dict (bool): Whether to return the output as a dictionary. Default is True.
329
+ num_inference_steps (int): The number of inference steps. Default is 2.
330
+
331
+ Returns:
332
+ Union[DecoderOutput, Tuple[torch.Tensor]]: The decoded output.
333
+
334
+ """
335
+ z = (z * self.config.scaling_factor - self.means) / self.stds
336
+
337
+ scale_factor = 2 ** (len(self.config.block_out_channels) - 1)
338
+ z = F.interpolate(z, mode="nearest", scale_factor=scale_factor)
339
+
340
+ batch_size, _, height, width = z.shape
341
+
342
+ self.decoder_scheduler.set_timesteps(num_inference_steps, device=self.device)
343
+
344
+ x_t = self.decoder_scheduler.init_noise_sigma * randn_tensor(
345
+ (batch_size, 3, height, width), generator=generator, dtype=z.dtype, device=z.device
346
+ )
347
+
348
+ for t in self.decoder_scheduler.timesteps:
349
+ model_input = torch.concat([self.decoder_scheduler.scale_model_input(x_t, t), z], dim=1)
350
+ model_output = self.decoder_unet(model_input, t).sample[:, :3, :, :]
351
+ prev_sample = self.decoder_scheduler.step(model_output, t, x_t, generator).prev_sample
352
+ x_t = prev_sample
353
+
354
+ x_0 = x_t
355
+
356
+ if not return_dict:
357
+ return (x_0,)
358
+
359
+ return DecoderOutput(sample=x_0)
360
+
361
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_v
362
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
363
+ blend_extent = min(a.shape[2], b.shape[2], blend_extent)
364
+ for y in range(blend_extent):
365
+ b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
366
+ return b
367
+
368
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_h
369
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
370
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
371
+ for x in range(blend_extent):
372
+ b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
373
+ return b
374
+
375
+ def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> Union[ConsistencyDecoderVAEOutput, Tuple]:
376
+ r"""Encode a batch of images using a tiled encoder.
377
+
378
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
379
+ steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
380
+ different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
381
+ tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
382
+ output, but they should be much less noticeable.
383
+
384
+ Args:
385
+ x (`torch.Tensor`): Input batch of images.
386
+ return_dict (`bool`, *optional*, defaults to `True`):
387
+ Whether or not to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
388
+ instead of a plain tuple.
389
+
390
+ Returns:
391
+ [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`:
392
+ If return_dict is True, a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
393
+ is returned, otherwise a plain `tuple` is returned.
394
+ """
395
+ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
396
+ blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
397
+ row_limit = self.tile_latent_min_size - blend_extent
398
+
399
+ # Split the image into 512x512 tiles and encode them separately.
400
+ rows = []
401
+ for i in range(0, x.shape[2], overlap_size):
402
+ row = []
403
+ for j in range(0, x.shape[3], overlap_size):
404
+ tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
405
+ tile = self.encoder(tile)
406
+ tile = self.quant_conv(tile)
407
+ row.append(tile)
408
+ rows.append(row)
409
+ result_rows = []
410
+ for i, row in enumerate(rows):
411
+ result_row = []
412
+ for j, tile in enumerate(row):
413
+ # blend the above tile and the left tile
414
+ # to the current tile and add the current tile to the result row
415
+ if i > 0:
416
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
417
+ if j > 0:
418
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
419
+ result_row.append(tile[:, :, :row_limit, :row_limit])
420
+ result_rows.append(torch.cat(result_row, dim=3))
421
+
422
+ moments = torch.cat(result_rows, dim=2)
423
+ posterior = DiagonalGaussianDistribution(moments)
424
+
425
+ if not return_dict:
426
+ return (posterior,)
427
+
428
+ return ConsistencyDecoderVAEOutput(latent_dist=posterior)
429
+
430
+ def forward(
431
+ self,
432
+ sample: torch.Tensor,
433
+ sample_posterior: bool = False,
434
+ return_dict: bool = True,
435
+ generator: Optional[torch.Generator] = None,
436
+ ) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
437
+ r"""
438
+ Args:
439
+ sample (`torch.Tensor`): Input sample.
440
+ sample_posterior (`bool`, *optional*, defaults to `False`):
441
+ Whether to sample from the posterior.
442
+ return_dict (`bool`, *optional*, defaults to `True`):
443
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
444
+ generator (`torch.Generator`, *optional*, defaults to `None`):
445
+ Generator to use for sampling.
446
+
447
+ Returns:
448
+ [`DecoderOutput`] or `tuple`:
449
+ If return_dict is True, a [`DecoderOutput`] is returned, otherwise a plain `tuple` is returned.
450
+ """
451
+ x = sample
452
+ posterior = self.encode(x).latent_dist
453
+ if sample_posterior:
454
+ z = posterior.sample(generator=generator)
455
+ else:
456
+ z = posterior.mode()
457
+ dec = self.decode(z, generator=generator).sample
458
+
459
+ if not return_dict:
460
+ return (dec,)
461
+
462
+ return DecoderOutput(sample=dec)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/vae.py ADDED
@@ -0,0 +1,896 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Optional, Tuple
16
+
17
+ import numpy as np
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from ...utils import BaseOutput
22
+ from ...utils.torch_utils import randn_tensor
23
+ from ..activations import get_activation
24
+ from ..attention_processor import SpatialNorm
25
+ from ..unets.unet_2d_blocks import (
26
+ AutoencoderTinyBlock,
27
+ UNetMidBlock2D,
28
+ get_down_block,
29
+ get_up_block,
30
+ )
31
+
32
+
33
+ @dataclass
34
+ class EncoderOutput(BaseOutput):
35
+ r"""
36
+ Output of encoding method.
37
+
38
+ Args:
39
+ latent (`torch.Tensor` of shape `(batch_size, num_channels, latent_height, latent_width)`):
40
+ The encoded latent.
41
+ """
42
+
43
+ latent: torch.Tensor
44
+
45
+
46
+ @dataclass
47
+ class DecoderOutput(BaseOutput):
48
+ r"""
49
+ Output of decoding method.
50
+
51
+ Args:
52
+ sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
53
+ The decoded output sample from the last layer of the model.
54
+ """
55
+
56
+ sample: torch.Tensor
57
+ commit_loss: Optional[torch.FloatTensor] = None
58
+
59
+
60
+ class Encoder(nn.Module):
61
+ r"""
62
+ The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation.
63
+
64
+ Args:
65
+ in_channels (`int`, *optional*, defaults to 3):
66
+ The number of input channels.
67
+ out_channels (`int`, *optional*, defaults to 3):
68
+ The number of output channels.
69
+ down_block_types (`Tuple[str, ...]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
70
+ The types of down blocks to use. See `~diffusers.models.unet_2d_blocks.get_down_block` for available
71
+ options.
72
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
73
+ The number of output channels for each block.
74
+ layers_per_block (`int`, *optional*, defaults to 2):
75
+ The number of layers per block.
76
+ norm_num_groups (`int`, *optional*, defaults to 32):
77
+ The number of groups for normalization.
78
+ act_fn (`str`, *optional*, defaults to `"silu"`):
79
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
80
+ double_z (`bool`, *optional*, defaults to `True`):
81
+ Whether to double the number of output channels for the last block.
82
+ """
83
+
84
+ def __init__(
85
+ self,
86
+ in_channels: int = 3,
87
+ out_channels: int = 3,
88
+ down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
89
+ block_out_channels: Tuple[int, ...] = (64,),
90
+ layers_per_block: int = 2,
91
+ norm_num_groups: int = 32,
92
+ act_fn: str = "silu",
93
+ double_z: bool = True,
94
+ mid_block_add_attention=True,
95
+ ):
96
+ super().__init__()
97
+ self.layers_per_block = layers_per_block
98
+
99
+ self.conv_in = nn.Conv2d(
100
+ in_channels,
101
+ block_out_channels[0],
102
+ kernel_size=3,
103
+ stride=1,
104
+ padding=1,
105
+ )
106
+
107
+ self.down_blocks = nn.ModuleList([])
108
+
109
+ # down
110
+ output_channel = block_out_channels[0]
111
+ for i, down_block_type in enumerate(down_block_types):
112
+ input_channel = output_channel
113
+ output_channel = block_out_channels[i]
114
+ is_final_block = i == len(block_out_channels) - 1
115
+
116
+ down_block = get_down_block(
117
+ down_block_type,
118
+ num_layers=self.layers_per_block,
119
+ in_channels=input_channel,
120
+ out_channels=output_channel,
121
+ add_downsample=not is_final_block,
122
+ resnet_eps=1e-6,
123
+ downsample_padding=0,
124
+ resnet_act_fn=act_fn,
125
+ resnet_groups=norm_num_groups,
126
+ attention_head_dim=output_channel,
127
+ temb_channels=None,
128
+ )
129
+ self.down_blocks.append(down_block)
130
+
131
+ # mid
132
+ self.mid_block = UNetMidBlock2D(
133
+ in_channels=block_out_channels[-1],
134
+ resnet_eps=1e-6,
135
+ resnet_act_fn=act_fn,
136
+ output_scale_factor=1,
137
+ resnet_time_scale_shift="default",
138
+ attention_head_dim=block_out_channels[-1],
139
+ resnet_groups=norm_num_groups,
140
+ temb_channels=None,
141
+ add_attention=mid_block_add_attention,
142
+ )
143
+
144
+ # out
145
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
146
+ self.conv_act = nn.SiLU()
147
+
148
+ conv_out_channels = 2 * out_channels if double_z else out_channels
149
+ self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1)
150
+
151
+ self.gradient_checkpointing = False
152
+
153
+ def forward(self, sample: torch.Tensor) -> torch.Tensor:
154
+ r"""The forward method of the `Encoder` class."""
155
+
156
+ sample = self.conv_in(sample)
157
+
158
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
159
+ # down
160
+ for down_block in self.down_blocks:
161
+ sample = self._gradient_checkpointing_func(down_block, sample)
162
+ # middle
163
+ sample = self._gradient_checkpointing_func(self.mid_block, sample)
164
+
165
+ else:
166
+ # down
167
+ for down_block in self.down_blocks:
168
+ sample = down_block(sample)
169
+
170
+ # middle
171
+ sample = self.mid_block(sample)
172
+
173
+ # post-process
174
+ sample = self.conv_norm_out(sample)
175
+ sample = self.conv_act(sample)
176
+ sample = self.conv_out(sample)
177
+
178
+ return sample
179
+
180
+
181
+ class Decoder(nn.Module):
182
+ r"""
183
+ The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample.
184
+
185
+ Args:
186
+ in_channels (`int`, *optional*, defaults to 3):
187
+ The number of input channels.
188
+ out_channels (`int`, *optional*, defaults to 3):
189
+ The number of output channels.
190
+ up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
191
+ The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
192
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
193
+ The number of output channels for each block.
194
+ layers_per_block (`int`, *optional*, defaults to 2):
195
+ The number of layers per block.
196
+ norm_num_groups (`int`, *optional*, defaults to 32):
197
+ The number of groups for normalization.
198
+ act_fn (`str`, *optional*, defaults to `"silu"`):
199
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
200
+ norm_type (`str`, *optional*, defaults to `"group"`):
201
+ The normalization type to use. Can be either `"group"` or `"spatial"`.
202
+ """
203
+
204
+ def __init__(
205
+ self,
206
+ in_channels: int = 3,
207
+ out_channels: int = 3,
208
+ up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
209
+ block_out_channels: Tuple[int, ...] = (64,),
210
+ layers_per_block: int = 2,
211
+ norm_num_groups: int = 32,
212
+ act_fn: str = "silu",
213
+ norm_type: str = "group", # group, spatial
214
+ mid_block_add_attention=True,
215
+ ):
216
+ super().__init__()
217
+ self.layers_per_block = layers_per_block
218
+
219
+ self.conv_in = nn.Conv2d(
220
+ in_channels,
221
+ block_out_channels[-1],
222
+ kernel_size=3,
223
+ stride=1,
224
+ padding=1,
225
+ )
226
+
227
+ self.up_blocks = nn.ModuleList([])
228
+
229
+ temb_channels = in_channels if norm_type == "spatial" else None
230
+
231
+ # mid
232
+ self.mid_block = UNetMidBlock2D(
233
+ in_channels=block_out_channels[-1],
234
+ resnet_eps=1e-6,
235
+ resnet_act_fn=act_fn,
236
+ output_scale_factor=1,
237
+ resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
238
+ attention_head_dim=block_out_channels[-1],
239
+ resnet_groups=norm_num_groups,
240
+ temb_channels=temb_channels,
241
+ add_attention=mid_block_add_attention,
242
+ )
243
+
244
+ # up
245
+ reversed_block_out_channels = list(reversed(block_out_channels))
246
+ output_channel = reversed_block_out_channels[0]
247
+ for i, up_block_type in enumerate(up_block_types):
248
+ prev_output_channel = output_channel
249
+ output_channel = reversed_block_out_channels[i]
250
+
251
+ is_final_block = i == len(block_out_channels) - 1
252
+
253
+ up_block = get_up_block(
254
+ up_block_type,
255
+ num_layers=self.layers_per_block + 1,
256
+ in_channels=prev_output_channel,
257
+ out_channels=output_channel,
258
+ prev_output_channel=prev_output_channel,
259
+ add_upsample=not is_final_block,
260
+ resnet_eps=1e-6,
261
+ resnet_act_fn=act_fn,
262
+ resnet_groups=norm_num_groups,
263
+ attention_head_dim=output_channel,
264
+ temb_channels=temb_channels,
265
+ resnet_time_scale_shift=norm_type,
266
+ )
267
+ self.up_blocks.append(up_block)
268
+ prev_output_channel = output_channel
269
+
270
+ # out
271
+ if norm_type == "spatial":
272
+ self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
273
+ else:
274
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
275
+ self.conv_act = nn.SiLU()
276
+ self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
277
+
278
+ self.gradient_checkpointing = False
279
+
280
+ def forward(
281
+ self,
282
+ sample: torch.Tensor,
283
+ latent_embeds: Optional[torch.Tensor] = None,
284
+ ) -> torch.Tensor:
285
+ r"""The forward method of the `Decoder` class."""
286
+
287
+ sample = self.conv_in(sample)
288
+
289
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
290
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
291
+ # middle
292
+ sample = self._gradient_checkpointing_func(self.mid_block, sample, latent_embeds)
293
+ sample = sample.to(upscale_dtype)
294
+
295
+ # up
296
+ for up_block in self.up_blocks:
297
+ sample = self._gradient_checkpointing_func(up_block, sample, latent_embeds)
298
+ else:
299
+ # middle
300
+ sample = self.mid_block(sample, latent_embeds)
301
+ sample = sample.to(upscale_dtype)
302
+
303
+ # up
304
+ for up_block in self.up_blocks:
305
+ sample = up_block(sample, latent_embeds)
306
+
307
+ # post-process
308
+ if latent_embeds is None:
309
+ sample = self.conv_norm_out(sample)
310
+ else:
311
+ sample = self.conv_norm_out(sample, latent_embeds)
312
+ sample = self.conv_act(sample)
313
+ sample = self.conv_out(sample)
314
+
315
+ return sample
316
+
317
+
318
+ class UpSample(nn.Module):
319
+ r"""
320
+ The `UpSample` layer of a variational autoencoder that upsamples its input.
321
+
322
+ Args:
323
+ in_channels (`int`, *optional*, defaults to 3):
324
+ The number of input channels.
325
+ out_channels (`int`, *optional*, defaults to 3):
326
+ The number of output channels.
327
+ """
328
+
329
+ def __init__(
330
+ self,
331
+ in_channels: int,
332
+ out_channels: int,
333
+ ) -> None:
334
+ super().__init__()
335
+ self.in_channels = in_channels
336
+ self.out_channels = out_channels
337
+ self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1)
338
+
339
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
340
+ r"""The forward method of the `UpSample` class."""
341
+ x = torch.relu(x)
342
+ x = self.deconv(x)
343
+ return x
344
+
345
+
346
+ class MaskConditionEncoder(nn.Module):
347
+ """
348
+ used in AsymmetricAutoencoderKL
349
+ """
350
+
351
+ def __init__(
352
+ self,
353
+ in_ch: int,
354
+ out_ch: int = 192,
355
+ res_ch: int = 768,
356
+ stride: int = 16,
357
+ ) -> None:
358
+ super().__init__()
359
+
360
+ channels = []
361
+ while stride > 1:
362
+ stride = stride // 2
363
+ in_ch_ = out_ch * 2
364
+ if out_ch > res_ch:
365
+ out_ch = res_ch
366
+ if stride == 1:
367
+ in_ch_ = res_ch
368
+ channels.append((in_ch_, out_ch))
369
+ out_ch *= 2
370
+
371
+ out_channels = []
372
+ for _in_ch, _out_ch in channels:
373
+ out_channels.append(_out_ch)
374
+ out_channels.append(channels[-1][0])
375
+
376
+ layers = []
377
+ in_ch_ = in_ch
378
+ for l in range(len(out_channels)):
379
+ out_ch_ = out_channels[l]
380
+ if l == 0 or l == 1:
381
+ layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=3, stride=1, padding=1))
382
+ else:
383
+ layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=4, stride=2, padding=1))
384
+ in_ch_ = out_ch_
385
+
386
+ self.layers = nn.Sequential(*layers)
387
+
388
+ def forward(self, x: torch.Tensor, mask=None) -> torch.Tensor:
389
+ r"""The forward method of the `MaskConditionEncoder` class."""
390
+ out = {}
391
+ for l in range(len(self.layers)):
392
+ layer = self.layers[l]
393
+ x = layer(x)
394
+ out[str(tuple(x.shape))] = x
395
+ x = torch.relu(x)
396
+ return out
397
+
398
+
399
+ class MaskConditionDecoder(nn.Module):
400
+ r"""The `MaskConditionDecoder` should be used in combination with [`AsymmetricAutoencoderKL`] to enhance the model's
401
+ decoder with a conditioner on the mask and masked image.
402
+
403
+ Args:
404
+ in_channels (`int`, *optional*, defaults to 3):
405
+ The number of input channels.
406
+ out_channels (`int`, *optional*, defaults to 3):
407
+ The number of output channels.
408
+ up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
409
+ The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
410
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
411
+ The number of output channels for each block.
412
+ layers_per_block (`int`, *optional*, defaults to 2):
413
+ The number of layers per block.
414
+ norm_num_groups (`int`, *optional*, defaults to 32):
415
+ The number of groups for normalization.
416
+ act_fn (`str`, *optional*, defaults to `"silu"`):
417
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
418
+ norm_type (`str`, *optional*, defaults to `"group"`):
419
+ The normalization type to use. Can be either `"group"` or `"spatial"`.
420
+ """
421
+
422
+ def __init__(
423
+ self,
424
+ in_channels: int = 3,
425
+ out_channels: int = 3,
426
+ up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
427
+ block_out_channels: Tuple[int, ...] = (64,),
428
+ layers_per_block: int = 2,
429
+ norm_num_groups: int = 32,
430
+ act_fn: str = "silu",
431
+ norm_type: str = "group", # group, spatial
432
+ ):
433
+ super().__init__()
434
+ self.layers_per_block = layers_per_block
435
+
436
+ self.conv_in = nn.Conv2d(
437
+ in_channels,
438
+ block_out_channels[-1],
439
+ kernel_size=3,
440
+ stride=1,
441
+ padding=1,
442
+ )
443
+
444
+ self.up_blocks = nn.ModuleList([])
445
+
446
+ temb_channels = in_channels if norm_type == "spatial" else None
447
+
448
+ # mid
449
+ self.mid_block = UNetMidBlock2D(
450
+ in_channels=block_out_channels[-1],
451
+ resnet_eps=1e-6,
452
+ resnet_act_fn=act_fn,
453
+ output_scale_factor=1,
454
+ resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
455
+ attention_head_dim=block_out_channels[-1],
456
+ resnet_groups=norm_num_groups,
457
+ temb_channels=temb_channels,
458
+ )
459
+
460
+ # up
461
+ reversed_block_out_channels = list(reversed(block_out_channels))
462
+ output_channel = reversed_block_out_channels[0]
463
+ for i, up_block_type in enumerate(up_block_types):
464
+ prev_output_channel = output_channel
465
+ output_channel = reversed_block_out_channels[i]
466
+
467
+ is_final_block = i == len(block_out_channels) - 1
468
+
469
+ up_block = get_up_block(
470
+ up_block_type,
471
+ num_layers=self.layers_per_block + 1,
472
+ in_channels=prev_output_channel,
473
+ out_channels=output_channel,
474
+ prev_output_channel=None,
475
+ add_upsample=not is_final_block,
476
+ resnet_eps=1e-6,
477
+ resnet_act_fn=act_fn,
478
+ resnet_groups=norm_num_groups,
479
+ attention_head_dim=output_channel,
480
+ temb_channels=temb_channels,
481
+ resnet_time_scale_shift=norm_type,
482
+ )
483
+ self.up_blocks.append(up_block)
484
+ prev_output_channel = output_channel
485
+
486
+ # condition encoder
487
+ self.condition_encoder = MaskConditionEncoder(
488
+ in_ch=out_channels,
489
+ out_ch=block_out_channels[0],
490
+ res_ch=block_out_channels[-1],
491
+ )
492
+
493
+ # out
494
+ if norm_type == "spatial":
495
+ self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
496
+ else:
497
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
498
+ self.conv_act = nn.SiLU()
499
+ self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
500
+
501
+ self.gradient_checkpointing = False
502
+
503
+ def forward(
504
+ self,
505
+ z: torch.Tensor,
506
+ image: Optional[torch.Tensor] = None,
507
+ mask: Optional[torch.Tensor] = None,
508
+ latent_embeds: Optional[torch.Tensor] = None,
509
+ ) -> torch.Tensor:
510
+ r"""The forward method of the `MaskConditionDecoder` class."""
511
+ sample = z
512
+ sample = self.conv_in(sample)
513
+
514
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
515
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
516
+ # middle
517
+ sample = self._gradient_checkpointing_func(self.mid_block, sample, latent_embeds)
518
+ sample = sample.to(upscale_dtype)
519
+
520
+ # condition encoder
521
+ if image is not None and mask is not None:
522
+ masked_image = (1 - mask) * image
523
+ im_x = self._gradient_checkpointing_func(
524
+ self.condition_encoder,
525
+ masked_image,
526
+ mask,
527
+ )
528
+
529
+ # up
530
+ for up_block in self.up_blocks:
531
+ if image is not None and mask is not None:
532
+ sample_ = im_x[str(tuple(sample.shape))]
533
+ mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
534
+ sample = sample * mask_ + sample_ * (1 - mask_)
535
+ sample = self._gradient_checkpointing_func(up_block, sample, latent_embeds)
536
+ if image is not None and mask is not None:
537
+ sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
538
+ else:
539
+ # middle
540
+ sample = self.mid_block(sample, latent_embeds)
541
+ sample = sample.to(upscale_dtype)
542
+
543
+ # condition encoder
544
+ if image is not None and mask is not None:
545
+ masked_image = (1 - mask) * image
546
+ im_x = self.condition_encoder(masked_image, mask)
547
+
548
+ # up
549
+ for up_block in self.up_blocks:
550
+ if image is not None and mask is not None:
551
+ sample_ = im_x[str(tuple(sample.shape))]
552
+ mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
553
+ sample = sample * mask_ + sample_ * (1 - mask_)
554
+ sample = up_block(sample, latent_embeds)
555
+ if image is not None and mask is not None:
556
+ sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
557
+
558
+ # post-process
559
+ if latent_embeds is None:
560
+ sample = self.conv_norm_out(sample)
561
+ else:
562
+ sample = self.conv_norm_out(sample, latent_embeds)
563
+ sample = self.conv_act(sample)
564
+ sample = self.conv_out(sample)
565
+
566
+ return sample
567
+
568
+
569
+ class VectorQuantizer(nn.Module):
570
+ """
571
+ Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix
572
+ multiplications and allows for post-hoc remapping of indices.
573
+ """
574
+
575
+ # NOTE: due to a bug the beta term was applied to the wrong term. for
576
+ # backwards compatibility we use the buggy version by default, but you can
577
+ # specify legacy=False to fix it.
578
+ def __init__(
579
+ self,
580
+ n_e: int,
581
+ vq_embed_dim: int,
582
+ beta: float,
583
+ remap=None,
584
+ unknown_index: str = "random",
585
+ sane_index_shape: bool = False,
586
+ legacy: bool = True,
587
+ ):
588
+ super().__init__()
589
+ self.n_e = n_e
590
+ self.vq_embed_dim = vq_embed_dim
591
+ self.beta = beta
592
+ self.legacy = legacy
593
+
594
+ self.embedding = nn.Embedding(self.n_e, self.vq_embed_dim)
595
+ self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
596
+
597
+ self.remap = remap
598
+ if self.remap is not None:
599
+ self.register_buffer("used", torch.tensor(np.load(self.remap)))
600
+ self.used: torch.Tensor
601
+ self.re_embed = self.used.shape[0]
602
+ self.unknown_index = unknown_index # "random" or "extra" or integer
603
+ if self.unknown_index == "extra":
604
+ self.unknown_index = self.re_embed
605
+ self.re_embed = self.re_embed + 1
606
+ print(
607
+ f"Remapping {self.n_e} indices to {self.re_embed} indices. "
608
+ f"Using {self.unknown_index} for unknown indices."
609
+ )
610
+ else:
611
+ self.re_embed = n_e
612
+
613
+ self.sane_index_shape = sane_index_shape
614
+
615
+ def remap_to_used(self, inds: torch.LongTensor) -> torch.LongTensor:
616
+ ishape = inds.shape
617
+ assert len(ishape) > 1
618
+ inds = inds.reshape(ishape[0], -1)
619
+ used = self.used.to(inds)
620
+ match = (inds[:, :, None] == used[None, None, ...]).long()
621
+ new = match.argmax(-1)
622
+ unknown = match.sum(2) < 1
623
+ if self.unknown_index == "random":
624
+ new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
625
+ else:
626
+ new[unknown] = self.unknown_index
627
+ return new.reshape(ishape)
628
+
629
+ def unmap_to_all(self, inds: torch.LongTensor) -> torch.LongTensor:
630
+ ishape = inds.shape
631
+ assert len(ishape) > 1
632
+ inds = inds.reshape(ishape[0], -1)
633
+ used = self.used.to(inds)
634
+ if self.re_embed > self.used.shape[0]: # extra token
635
+ inds[inds >= self.used.shape[0]] = 0 # simply set to zero
636
+ back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
637
+ return back.reshape(ishape)
638
+
639
+ def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, Tuple]:
640
+ # reshape z -> (batch, height, width, channel) and flatten
641
+ z = z.permute(0, 2, 3, 1).contiguous()
642
+ z_flattened = z.view(-1, self.vq_embed_dim)
643
+
644
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
645
+ min_encoding_indices = torch.argmin(torch.cdist(z_flattened, self.embedding.weight), dim=1)
646
+
647
+ z_q = self.embedding(min_encoding_indices).view(z.shape)
648
+ perplexity = None
649
+ min_encodings = None
650
+
651
+ # compute loss for embedding
652
+ if not self.legacy:
653
+ loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
654
+ else:
655
+ loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
656
+
657
+ # preserve gradients
658
+ z_q: torch.Tensor = z + (z_q - z).detach()
659
+
660
+ # reshape back to match original input shape
661
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
662
+
663
+ if self.remap is not None:
664
+ min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
665
+ min_encoding_indices = self.remap_to_used(min_encoding_indices)
666
+ min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
667
+
668
+ if self.sane_index_shape:
669
+ min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3])
670
+
671
+ return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
672
+
673
+ def get_codebook_entry(self, indices: torch.LongTensor, shape: Tuple[int, ...]) -> torch.Tensor:
674
+ # shape specifying (batch, height, width, channel)
675
+ if self.remap is not None:
676
+ indices = indices.reshape(shape[0], -1) # add batch axis
677
+ indices = self.unmap_to_all(indices)
678
+ indices = indices.reshape(-1) # flatten again
679
+
680
+ # get quantized latent vectors
681
+ z_q: torch.Tensor = self.embedding(indices)
682
+
683
+ if shape is not None:
684
+ z_q = z_q.view(shape)
685
+ # reshape back to match original input shape
686
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
687
+
688
+ return z_q
689
+
690
+
691
+ class DiagonalGaussianDistribution(object):
692
+ def __init__(self, parameters: torch.Tensor, deterministic: bool = False):
693
+ self.parameters = parameters
694
+ self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
695
+ self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
696
+ self.deterministic = deterministic
697
+ self.std = torch.exp(0.5 * self.logvar)
698
+ self.var = torch.exp(self.logvar)
699
+ if self.deterministic:
700
+ self.var = self.std = torch.zeros_like(
701
+ self.mean, device=self.parameters.device, dtype=self.parameters.dtype
702
+ )
703
+
704
+ def sample(self, generator: Optional[torch.Generator] = None) -> torch.Tensor:
705
+ # make sure sample is on the same device as the parameters and has same dtype
706
+ sample = randn_tensor(
707
+ self.mean.shape,
708
+ generator=generator,
709
+ device=self.parameters.device,
710
+ dtype=self.parameters.dtype,
711
+ )
712
+ x = self.mean + self.std * sample
713
+ return x
714
+
715
+ def kl(self, other: "DiagonalGaussianDistribution" = None) -> torch.Tensor:
716
+ if self.deterministic:
717
+ return torch.Tensor([0.0])
718
+ else:
719
+ if other is None:
720
+ return 0.5 * torch.sum(
721
+ torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,
722
+ dim=[1, 2, 3],
723
+ )
724
+ else:
725
+ return 0.5 * torch.sum(
726
+ torch.pow(self.mean - other.mean, 2) / other.var
727
+ + self.var / other.var
728
+ - 1.0
729
+ - self.logvar
730
+ + other.logvar,
731
+ dim=[1, 2, 3],
732
+ )
733
+
734
+ def nll(self, sample: torch.Tensor, dims: Tuple[int, ...] = [1, 2, 3]) -> torch.Tensor:
735
+ if self.deterministic:
736
+ return torch.Tensor([0.0])
737
+ logtwopi = np.log(2.0 * np.pi)
738
+ return 0.5 * torch.sum(
739
+ logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
740
+ dim=dims,
741
+ )
742
+
743
+ def mode(self) -> torch.Tensor:
744
+ return self.mean
745
+
746
+
747
+ class IdentityDistribution(object):
748
+ def __init__(self, parameters: torch.Tensor):
749
+ self.parameters = parameters
750
+
751
+ def sample(self, generator: Optional[torch.Generator] = None) -> torch.Tensor:
752
+ return self.parameters
753
+
754
+ def mode(self) -> torch.Tensor:
755
+ return self.parameters
756
+
757
+
758
+ class EncoderTiny(nn.Module):
759
+ r"""
760
+ The `EncoderTiny` layer is a simpler version of the `Encoder` layer.
761
+
762
+ Args:
763
+ in_channels (`int`):
764
+ The number of input channels.
765
+ out_channels (`int`):
766
+ The number of output channels.
767
+ num_blocks (`Tuple[int, ...]`):
768
+ Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to
769
+ use.
770
+ block_out_channels (`Tuple[int, ...]`):
771
+ The number of output channels for each block.
772
+ act_fn (`str`):
773
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
774
+ """
775
+
776
+ def __init__(
777
+ self,
778
+ in_channels: int,
779
+ out_channels: int,
780
+ num_blocks: Tuple[int, ...],
781
+ block_out_channels: Tuple[int, ...],
782
+ act_fn: str,
783
+ ):
784
+ super().__init__()
785
+
786
+ layers = []
787
+ for i, num_block in enumerate(num_blocks):
788
+ num_channels = block_out_channels[i]
789
+
790
+ if i == 0:
791
+ layers.append(nn.Conv2d(in_channels, num_channels, kernel_size=3, padding=1))
792
+ else:
793
+ layers.append(
794
+ nn.Conv2d(
795
+ num_channels,
796
+ num_channels,
797
+ kernel_size=3,
798
+ padding=1,
799
+ stride=2,
800
+ bias=False,
801
+ )
802
+ )
803
+
804
+ for _ in range(num_block):
805
+ layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))
806
+
807
+ layers.append(nn.Conv2d(block_out_channels[-1], out_channels, kernel_size=3, padding=1))
808
+
809
+ self.layers = nn.Sequential(*layers)
810
+ self.gradient_checkpointing = False
811
+
812
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
813
+ r"""The forward method of the `EncoderTiny` class."""
814
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
815
+ x = self._gradient_checkpointing_func(self.layers, x)
816
+
817
+ else:
818
+ # scale image from [-1, 1] to [0, 1] to match TAESD convention
819
+ x = self.layers(x.add(1).div(2))
820
+
821
+ return x
822
+
823
+
824
+ class DecoderTiny(nn.Module):
825
+ r"""
826
+ The `DecoderTiny` layer is a simpler version of the `Decoder` layer.
827
+
828
+ Args:
829
+ in_channels (`int`):
830
+ The number of input channels.
831
+ out_channels (`int`):
832
+ The number of output channels.
833
+ num_blocks (`Tuple[int, ...]`):
834
+ Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to
835
+ use.
836
+ block_out_channels (`Tuple[int, ...]`):
837
+ The number of output channels for each block.
838
+ upsampling_scaling_factor (`int`):
839
+ The scaling factor to use for upsampling.
840
+ act_fn (`str`):
841
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
842
+ """
843
+
844
+ def __init__(
845
+ self,
846
+ in_channels: int,
847
+ out_channels: int,
848
+ num_blocks: Tuple[int, ...],
849
+ block_out_channels: Tuple[int, ...],
850
+ upsampling_scaling_factor: int,
851
+ act_fn: str,
852
+ upsample_fn: str,
853
+ ):
854
+ super().__init__()
855
+
856
+ layers = [
857
+ nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=1),
858
+ get_activation(act_fn),
859
+ ]
860
+
861
+ for i, num_block in enumerate(num_blocks):
862
+ is_final_block = i == (len(num_blocks) - 1)
863
+ num_channels = block_out_channels[i]
864
+
865
+ for _ in range(num_block):
866
+ layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))
867
+
868
+ if not is_final_block:
869
+ layers.append(nn.Upsample(scale_factor=upsampling_scaling_factor, mode=upsample_fn))
870
+
871
+ conv_out_channel = num_channels if not is_final_block else out_channels
872
+ layers.append(
873
+ nn.Conv2d(
874
+ num_channels,
875
+ conv_out_channel,
876
+ kernel_size=3,
877
+ padding=1,
878
+ bias=is_final_block,
879
+ )
880
+ )
881
+
882
+ self.layers = nn.Sequential(*layers)
883
+ self.gradient_checkpointing = False
884
+
885
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
886
+ r"""The forward method of the `DecoderTiny` class."""
887
+ # Clamp.
888
+ x = torch.tanh(x / 3) * 3
889
+
890
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
891
+ x = self._gradient_checkpointing_func(self.layers, x)
892
+ else:
893
+ x = self.layers(x)
894
+
895
+ # scale image from [0, 1] to [-1, 1] to match diffusers convention
896
+ return x.mul(2).sub(1)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/autoencoders/vq_model.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from ...configuration_utils import ConfigMixin, register_to_config
21
+ from ...utils import BaseOutput
22
+ from ...utils.accelerate_utils import apply_forward_hook
23
+ from ..autoencoders.vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
24
+ from ..modeling_utils import ModelMixin
25
+
26
+
27
+ @dataclass
28
+ class VQEncoderOutput(BaseOutput):
29
+ """
30
+ Output of VQModel encoding method.
31
+
32
+ Args:
33
+ latents (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
34
+ The encoded output sample from the last layer of the model.
35
+ """
36
+
37
+ latents: torch.Tensor
38
+
39
+
40
+ class VQModel(ModelMixin, ConfigMixin):
41
+ r"""
42
+ A VQ-VAE model for decoding latent representations.
43
+
44
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
45
+ for all models (such as downloading or saving).
46
+
47
+ Parameters:
48
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
49
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
50
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
51
+ Tuple of downsample block types.
52
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
53
+ Tuple of upsample block types.
54
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
55
+ Tuple of block output channels.
56
+ layers_per_block (`int`, *optional*, defaults to `1`): Number of layers per block.
57
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
58
+ latent_channels (`int`, *optional*, defaults to `3`): Number of channels in the latent space.
59
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
60
+ num_vq_embeddings (`int`, *optional*, defaults to `256`): Number of codebook vectors in the VQ-VAE.
61
+ norm_num_groups (`int`, *optional*, defaults to `32`): Number of groups for normalization layers.
62
+ vq_embed_dim (`int`, *optional*): Hidden dim of codebook vectors in the VQ-VAE.
63
+ scaling_factor (`float`, *optional*, defaults to `0.18215`):
64
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
65
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
66
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
67
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
68
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
69
+ Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper.
70
+ norm_type (`str`, *optional*, defaults to `"group"`):
71
+ Type of normalization layer to use. Can be one of `"group"` or `"spatial"`.
72
+ """
73
+
74
+ _skip_layerwise_casting_patterns = ["quantize"]
75
+ _supports_group_offloading = False
76
+
77
+ @register_to_config
78
+ def __init__(
79
+ self,
80
+ in_channels: int = 3,
81
+ out_channels: int = 3,
82
+ down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
83
+ up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
84
+ block_out_channels: Tuple[int, ...] = (64,),
85
+ layers_per_block: int = 1,
86
+ act_fn: str = "silu",
87
+ latent_channels: int = 3,
88
+ sample_size: int = 32,
89
+ num_vq_embeddings: int = 256,
90
+ norm_num_groups: int = 32,
91
+ vq_embed_dim: Optional[int] = None,
92
+ scaling_factor: float = 0.18215,
93
+ norm_type: str = "group", # group, spatial
94
+ mid_block_add_attention=True,
95
+ lookup_from_codebook=False,
96
+ force_upcast=False,
97
+ ):
98
+ super().__init__()
99
+
100
+ # pass init params to Encoder
101
+ self.encoder = Encoder(
102
+ in_channels=in_channels,
103
+ out_channels=latent_channels,
104
+ down_block_types=down_block_types,
105
+ block_out_channels=block_out_channels,
106
+ layers_per_block=layers_per_block,
107
+ act_fn=act_fn,
108
+ norm_num_groups=norm_num_groups,
109
+ double_z=False,
110
+ mid_block_add_attention=mid_block_add_attention,
111
+ )
112
+
113
+ vq_embed_dim = vq_embed_dim if vq_embed_dim is not None else latent_channels
114
+
115
+ self.quant_conv = nn.Conv2d(latent_channels, vq_embed_dim, 1)
116
+ self.quantize = VectorQuantizer(num_vq_embeddings, vq_embed_dim, beta=0.25, remap=None, sane_index_shape=False)
117
+ self.post_quant_conv = nn.Conv2d(vq_embed_dim, latent_channels, 1)
118
+
119
+ # pass init params to Decoder
120
+ self.decoder = Decoder(
121
+ in_channels=latent_channels,
122
+ out_channels=out_channels,
123
+ up_block_types=up_block_types,
124
+ block_out_channels=block_out_channels,
125
+ layers_per_block=layers_per_block,
126
+ act_fn=act_fn,
127
+ norm_num_groups=norm_num_groups,
128
+ norm_type=norm_type,
129
+ mid_block_add_attention=mid_block_add_attention,
130
+ )
131
+
132
+ @apply_forward_hook
133
+ def encode(self, x: torch.Tensor, return_dict: bool = True) -> VQEncoderOutput:
134
+ h = self.encoder(x)
135
+ h = self.quant_conv(h)
136
+
137
+ if not return_dict:
138
+ return (h,)
139
+
140
+ return VQEncoderOutput(latents=h)
141
+
142
+ @apply_forward_hook
143
+ def decode(
144
+ self, h: torch.Tensor, force_not_quantize: bool = False, return_dict: bool = True, shape=None
145
+ ) -> Union[DecoderOutput, torch.Tensor]:
146
+ # also go through quantization layer
147
+ if not force_not_quantize:
148
+ quant, commit_loss, _ = self.quantize(h)
149
+ elif self.config.lookup_from_codebook:
150
+ quant = self.quantize.get_codebook_entry(h, shape)
151
+ commit_loss = torch.zeros((h.shape[0])).to(h.device, dtype=h.dtype)
152
+ else:
153
+ quant = h
154
+ commit_loss = torch.zeros((h.shape[0])).to(h.device, dtype=h.dtype)
155
+ quant2 = self.post_quant_conv(quant)
156
+ dec = self.decoder(quant2, quant if self.config.norm_type == "spatial" else None)
157
+
158
+ if not return_dict:
159
+ return dec, commit_loss
160
+
161
+ return DecoderOutput(sample=dec, commit_loss=commit_loss)
162
+
163
+ def forward(
164
+ self, sample: torch.Tensor, return_dict: bool = True
165
+ ) -> Union[DecoderOutput, Tuple[torch.Tensor, ...]]:
166
+ r"""
167
+ The [`VQModel`] forward method.
168
+
169
+ Args:
170
+ sample (`torch.Tensor`): Input sample.
171
+ return_dict (`bool`, *optional*, defaults to `True`):
172
+ Whether or not to return a [`models.autoencoders.vq_model.VQEncoderOutput`] instead of a plain tuple.
173
+
174
+ Returns:
175
+ [`~models.autoencoders.vq_model.VQEncoderOutput`] or `tuple`:
176
+ If return_dict is True, a [`~models.autoencoders.vq_model.VQEncoderOutput`] is returned, otherwise a
177
+ plain `tuple` is returned.
178
+ """
179
+
180
+ h = self.encode(sample).latents
181
+ dec = self.decode(h)
182
+
183
+ if not return_dict:
184
+ return dec.sample, dec.commit_loss
185
+ return dec
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ...utils import is_flax_available, is_torch_available
2
+
3
+
4
+ if is_torch_available():
5
+ from .controlnet import ControlNetModel, ControlNetOutput
6
+ from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
7
+ from .controlnet_hunyuan import (
8
+ HunyuanControlNetOutput,
9
+ HunyuanDiT2DControlNetModel,
10
+ HunyuanDiT2DMultiControlNetModel,
11
+ )
12
+ from .controlnet_qwenimage import QwenImageControlNetModel, QwenImageMultiControlNetModel
13
+ from .controlnet_sana import SanaControlNetModel
14
+ from .controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel
15
+ from .controlnet_sparsectrl import (
16
+ SparseControlNetConditioningEmbedding,
17
+ SparseControlNetModel,
18
+ SparseControlNetOutput,
19
+ )
20
+ from .controlnet_union import ControlNetUnionModel
21
+ from .controlnet_xs import ControlNetXSAdapter, ControlNetXSOutput, UNetControlNetXSModel
22
+ from .multicontrolnet import MultiControlNetModel
23
+ from .multicontrolnet_union import MultiControlNetUnionModel
24
+
25
+ if is_flax_available():
26
+ from .controlnet_flax import FlaxControlNetModel
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet.py ADDED
@@ -0,0 +1,867 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Any, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ from torch import nn
19
+ from torch.nn import functional as F
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders.single_file_model import FromOriginalModelMixin
23
+ from ...utils import BaseOutput, logging
24
+ from ..attention_processor import (
25
+ ADDED_KV_ATTENTION_PROCESSORS,
26
+ CROSS_ATTENTION_PROCESSORS,
27
+ AttentionProcessor,
28
+ AttnAddedKVProcessor,
29
+ AttnProcessor,
30
+ )
31
+ from ..embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps
32
+ from ..modeling_utils import ModelMixin
33
+ from ..unets.unet_2d_blocks import (
34
+ UNetMidBlock2D,
35
+ UNetMidBlock2DCrossAttn,
36
+ get_down_block,
37
+ )
38
+ from ..unets.unet_2d_condition import UNet2DConditionModel
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+
44
+ @dataclass
45
+ class ControlNetOutput(BaseOutput):
46
+ """
47
+ The output of [`ControlNetModel`].
48
+
49
+ Args:
50
+ down_block_res_samples (`tuple[torch.Tensor]`):
51
+ A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should
52
+ be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be
53
+ used to condition the original UNet's downsampling activations.
54
+ mid_down_block_re_sample (`torch.Tensor`):
55
+ The activation of the middle block (the lowest sample resolution). Each tensor should be of shape
56
+ `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`.
57
+ Output can be used to condition the original UNet's middle block activation.
58
+ """
59
+
60
+ down_block_res_samples: Tuple[torch.Tensor]
61
+ mid_block_res_sample: torch.Tensor
62
+
63
+
64
+ class ControlNetConditioningEmbedding(nn.Module):
65
+ """
66
+ Quoting from https://huggingface.co/papers/2302.05543: "Stable Diffusion uses a pre-processing method similar to
67
+ VQ-GAN [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
68
+ training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
69
+ convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
70
+ (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
71
+ model) to encode image-space conditions ... into feature maps ..."
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ conditioning_embedding_channels: int,
77
+ conditioning_channels: int = 3,
78
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
79
+ ):
80
+ super().__init__()
81
+
82
+ self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
83
+
84
+ self.blocks = nn.ModuleList([])
85
+
86
+ for i in range(len(block_out_channels) - 1):
87
+ channel_in = block_out_channels[i]
88
+ channel_out = block_out_channels[i + 1]
89
+ self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
90
+ self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
91
+
92
+ self.conv_out = zero_module(
93
+ nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
94
+ )
95
+
96
+ def forward(self, conditioning):
97
+ embedding = self.conv_in(conditioning)
98
+ embedding = F.silu(embedding)
99
+
100
+ for block in self.blocks:
101
+ embedding = block(embedding)
102
+ embedding = F.silu(embedding)
103
+
104
+ embedding = self.conv_out(embedding)
105
+
106
+ return embedding
107
+
108
+
109
+ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
110
+ """
111
+ A ControlNet model.
112
+
113
+ Args:
114
+ in_channels (`int`, defaults to 4):
115
+ The number of channels in the input sample.
116
+ flip_sin_to_cos (`bool`, defaults to `True`):
117
+ Whether to flip the sin to cos in the time embedding.
118
+ freq_shift (`int`, defaults to 0):
119
+ The frequency shift to apply to the time embedding.
120
+ down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
121
+ The tuple of downsample blocks to use.
122
+ only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`):
123
+ block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`):
124
+ The tuple of output channels for each block.
125
+ layers_per_block (`int`, defaults to 2):
126
+ The number of layers per block.
127
+ downsample_padding (`int`, defaults to 1):
128
+ The padding to use for the downsampling convolution.
129
+ mid_block_scale_factor (`float`, defaults to 1):
130
+ The scale factor to use for the mid block.
131
+ act_fn (`str`, defaults to "silu"):
132
+ The activation function to use.
133
+ norm_num_groups (`int`, *optional*, defaults to 32):
134
+ The number of groups to use for the normalization. If None, normalization and activation layers is skipped
135
+ in post-processing.
136
+ norm_eps (`float`, defaults to 1e-5):
137
+ The epsilon to use for the normalization.
138
+ cross_attention_dim (`int`, defaults to 1280):
139
+ The dimension of the cross attention features.
140
+ transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
141
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
142
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
143
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
144
+ encoder_hid_dim (`int`, *optional*, defaults to None):
145
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
146
+ dimension to `cross_attention_dim`.
147
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
148
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
149
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
150
+ attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8):
151
+ The dimension of the attention heads.
152
+ use_linear_projection (`bool`, defaults to `False`):
153
+ class_embed_type (`str`, *optional*, defaults to `None`):
154
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None,
155
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
156
+ addition_embed_type (`str`, *optional*, defaults to `None`):
157
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
158
+ "text". "text" will use the `TextTimeEmbedding` layer.
159
+ num_class_embeds (`int`, *optional*, defaults to 0):
160
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
161
+ class conditioning with `class_embed_type` equal to `None`.
162
+ upcast_attention (`bool`, defaults to `False`):
163
+ resnet_time_scale_shift (`str`, defaults to `"default"`):
164
+ Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`.
165
+ projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`):
166
+ The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when
167
+ `class_embed_type="projection"`.
168
+ controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
169
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
170
+ conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`):
171
+ The tuple of output channel for each block in the `conditioning_embedding` layer.
172
+ global_pool_conditions (`bool`, defaults to `False`):
173
+ TODO(Patrick) - unused parameter.
174
+ addition_embed_type_num_heads (`int`, defaults to 64):
175
+ The number of heads to use for the `TextTimeEmbedding` layer.
176
+ """
177
+
178
+ _supports_gradient_checkpointing = True
179
+
180
+ @register_to_config
181
+ def __init__(
182
+ self,
183
+ in_channels: int = 4,
184
+ conditioning_channels: int = 3,
185
+ flip_sin_to_cos: bool = True,
186
+ freq_shift: int = 0,
187
+ down_block_types: Tuple[str, ...] = (
188
+ "CrossAttnDownBlock2D",
189
+ "CrossAttnDownBlock2D",
190
+ "CrossAttnDownBlock2D",
191
+ "DownBlock2D",
192
+ ),
193
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
194
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
195
+ block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
196
+ layers_per_block: int = 2,
197
+ downsample_padding: int = 1,
198
+ mid_block_scale_factor: float = 1,
199
+ act_fn: str = "silu",
200
+ norm_num_groups: Optional[int] = 32,
201
+ norm_eps: float = 1e-5,
202
+ cross_attention_dim: int = 1280,
203
+ transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1,
204
+ encoder_hid_dim: Optional[int] = None,
205
+ encoder_hid_dim_type: Optional[str] = None,
206
+ attention_head_dim: Union[int, Tuple[int, ...]] = 8,
207
+ num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None,
208
+ use_linear_projection: bool = False,
209
+ class_embed_type: Optional[str] = None,
210
+ addition_embed_type: Optional[str] = None,
211
+ addition_time_embed_dim: Optional[int] = None,
212
+ num_class_embeds: Optional[int] = None,
213
+ upcast_attention: bool = False,
214
+ resnet_time_scale_shift: str = "default",
215
+ projection_class_embeddings_input_dim: Optional[int] = None,
216
+ controlnet_conditioning_channel_order: str = "rgb",
217
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
218
+ global_pool_conditions: bool = False,
219
+ addition_embed_type_num_heads: int = 64,
220
+ ):
221
+ super().__init__()
222
+
223
+ # If `num_attention_heads` is not defined (which is the case for most models)
224
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
225
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
226
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
227
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
228
+ # which is why we correct for the naming here.
229
+ num_attention_heads = num_attention_heads or attention_head_dim
230
+
231
+ # Check inputs
232
+ if len(block_out_channels) != len(down_block_types):
233
+ raise ValueError(
234
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
235
+ )
236
+
237
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
238
+ raise ValueError(
239
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
240
+ )
241
+
242
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
243
+ raise ValueError(
244
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
245
+ )
246
+
247
+ if isinstance(transformer_layers_per_block, int):
248
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
249
+
250
+ # input
251
+ conv_in_kernel = 3
252
+ conv_in_padding = (conv_in_kernel - 1) // 2
253
+ self.conv_in = nn.Conv2d(
254
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
255
+ )
256
+
257
+ # time
258
+ time_embed_dim = block_out_channels[0] * 4
259
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
260
+ timestep_input_dim = block_out_channels[0]
261
+ self.time_embedding = TimestepEmbedding(
262
+ timestep_input_dim,
263
+ time_embed_dim,
264
+ act_fn=act_fn,
265
+ )
266
+
267
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
268
+ encoder_hid_dim_type = "text_proj"
269
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
270
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
271
+
272
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
273
+ raise ValueError(
274
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
275
+ )
276
+
277
+ if encoder_hid_dim_type == "text_proj":
278
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
279
+ elif encoder_hid_dim_type == "text_image_proj":
280
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
281
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
282
+ # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
283
+ self.encoder_hid_proj = TextImageProjection(
284
+ text_embed_dim=encoder_hid_dim,
285
+ image_embed_dim=cross_attention_dim,
286
+ cross_attention_dim=cross_attention_dim,
287
+ )
288
+
289
+ elif encoder_hid_dim_type is not None:
290
+ raise ValueError(
291
+ f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
292
+ )
293
+ else:
294
+ self.encoder_hid_proj = None
295
+
296
+ # class embedding
297
+ if class_embed_type is None and num_class_embeds is not None:
298
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
299
+ elif class_embed_type == "timestep":
300
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
301
+ elif class_embed_type == "identity":
302
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
303
+ elif class_embed_type == "projection":
304
+ if projection_class_embeddings_input_dim is None:
305
+ raise ValueError(
306
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
307
+ )
308
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
309
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
310
+ # 2. it projects from an arbitrary input dimension.
311
+ #
312
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
313
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
314
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
315
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
316
+ else:
317
+ self.class_embedding = None
318
+
319
+ if addition_embed_type == "text":
320
+ if encoder_hid_dim is not None:
321
+ text_time_embedding_from_dim = encoder_hid_dim
322
+ else:
323
+ text_time_embedding_from_dim = cross_attention_dim
324
+
325
+ self.add_embedding = TextTimeEmbedding(
326
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
327
+ )
328
+ elif addition_embed_type == "text_image":
329
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
330
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
331
+ # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
332
+ self.add_embedding = TextImageTimeEmbedding(
333
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
334
+ )
335
+ elif addition_embed_type == "text_time":
336
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
337
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
338
+
339
+ elif addition_embed_type is not None:
340
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
341
+
342
+ # control net conditioning embedding
343
+ self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
344
+ conditioning_embedding_channels=block_out_channels[0],
345
+ block_out_channels=conditioning_embedding_out_channels,
346
+ conditioning_channels=conditioning_channels,
347
+ )
348
+
349
+ self.down_blocks = nn.ModuleList([])
350
+ self.controlnet_down_blocks = nn.ModuleList([])
351
+
352
+ if isinstance(only_cross_attention, bool):
353
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
354
+
355
+ if isinstance(attention_head_dim, int):
356
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
357
+
358
+ if isinstance(num_attention_heads, int):
359
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
360
+
361
+ # down
362
+ output_channel = block_out_channels[0]
363
+
364
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
365
+ controlnet_block = zero_module(controlnet_block)
366
+ self.controlnet_down_blocks.append(controlnet_block)
367
+
368
+ for i, down_block_type in enumerate(down_block_types):
369
+ input_channel = output_channel
370
+ output_channel = block_out_channels[i]
371
+ is_final_block = i == len(block_out_channels) - 1
372
+
373
+ down_block = get_down_block(
374
+ down_block_type,
375
+ num_layers=layers_per_block,
376
+ transformer_layers_per_block=transformer_layers_per_block[i],
377
+ in_channels=input_channel,
378
+ out_channels=output_channel,
379
+ temb_channels=time_embed_dim,
380
+ add_downsample=not is_final_block,
381
+ resnet_eps=norm_eps,
382
+ resnet_act_fn=act_fn,
383
+ resnet_groups=norm_num_groups,
384
+ cross_attention_dim=cross_attention_dim,
385
+ num_attention_heads=num_attention_heads[i],
386
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
387
+ downsample_padding=downsample_padding,
388
+ use_linear_projection=use_linear_projection,
389
+ only_cross_attention=only_cross_attention[i],
390
+ upcast_attention=upcast_attention,
391
+ resnet_time_scale_shift=resnet_time_scale_shift,
392
+ )
393
+ self.down_blocks.append(down_block)
394
+
395
+ for _ in range(layers_per_block):
396
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
397
+ controlnet_block = zero_module(controlnet_block)
398
+ self.controlnet_down_blocks.append(controlnet_block)
399
+
400
+ if not is_final_block:
401
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
402
+ controlnet_block = zero_module(controlnet_block)
403
+ self.controlnet_down_blocks.append(controlnet_block)
404
+
405
+ # mid
406
+ mid_block_channel = block_out_channels[-1]
407
+
408
+ controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)
409
+ controlnet_block = zero_module(controlnet_block)
410
+ self.controlnet_mid_block = controlnet_block
411
+
412
+ if mid_block_type == "UNetMidBlock2DCrossAttn":
413
+ self.mid_block = UNetMidBlock2DCrossAttn(
414
+ transformer_layers_per_block=transformer_layers_per_block[-1],
415
+ in_channels=mid_block_channel,
416
+ temb_channels=time_embed_dim,
417
+ resnet_eps=norm_eps,
418
+ resnet_act_fn=act_fn,
419
+ output_scale_factor=mid_block_scale_factor,
420
+ resnet_time_scale_shift=resnet_time_scale_shift,
421
+ cross_attention_dim=cross_attention_dim,
422
+ num_attention_heads=num_attention_heads[-1],
423
+ resnet_groups=norm_num_groups,
424
+ use_linear_projection=use_linear_projection,
425
+ upcast_attention=upcast_attention,
426
+ )
427
+ elif mid_block_type == "UNetMidBlock2D":
428
+ self.mid_block = UNetMidBlock2D(
429
+ in_channels=block_out_channels[-1],
430
+ temb_channels=time_embed_dim,
431
+ num_layers=0,
432
+ resnet_eps=norm_eps,
433
+ resnet_act_fn=act_fn,
434
+ output_scale_factor=mid_block_scale_factor,
435
+ resnet_groups=norm_num_groups,
436
+ resnet_time_scale_shift=resnet_time_scale_shift,
437
+ add_attention=False,
438
+ )
439
+ else:
440
+ raise ValueError(f"unknown mid_block_type : {mid_block_type}")
441
+
442
+ @classmethod
443
+ def from_unet(
444
+ cls,
445
+ unet: UNet2DConditionModel,
446
+ controlnet_conditioning_channel_order: str = "rgb",
447
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
448
+ load_weights_from_unet: bool = True,
449
+ conditioning_channels: int = 3,
450
+ ):
451
+ r"""
452
+ Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`].
453
+
454
+ Parameters:
455
+ unet (`UNet2DConditionModel`):
456
+ The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied
457
+ where applicable.
458
+ """
459
+ transformer_layers_per_block = (
460
+ unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1
461
+ )
462
+ encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None
463
+ encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None
464
+ addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None
465
+ addition_time_embed_dim = (
466
+ unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None
467
+ )
468
+
469
+ controlnet = cls(
470
+ encoder_hid_dim=encoder_hid_dim,
471
+ encoder_hid_dim_type=encoder_hid_dim_type,
472
+ addition_embed_type=addition_embed_type,
473
+ addition_time_embed_dim=addition_time_embed_dim,
474
+ transformer_layers_per_block=transformer_layers_per_block,
475
+ in_channels=unet.config.in_channels,
476
+ flip_sin_to_cos=unet.config.flip_sin_to_cos,
477
+ freq_shift=unet.config.freq_shift,
478
+ down_block_types=unet.config.down_block_types,
479
+ only_cross_attention=unet.config.only_cross_attention,
480
+ block_out_channels=unet.config.block_out_channels,
481
+ layers_per_block=unet.config.layers_per_block,
482
+ downsample_padding=unet.config.downsample_padding,
483
+ mid_block_scale_factor=unet.config.mid_block_scale_factor,
484
+ act_fn=unet.config.act_fn,
485
+ norm_num_groups=unet.config.norm_num_groups,
486
+ norm_eps=unet.config.norm_eps,
487
+ cross_attention_dim=unet.config.cross_attention_dim,
488
+ attention_head_dim=unet.config.attention_head_dim,
489
+ num_attention_heads=unet.config.num_attention_heads,
490
+ use_linear_projection=unet.config.use_linear_projection,
491
+ class_embed_type=unet.config.class_embed_type,
492
+ num_class_embeds=unet.config.num_class_embeds,
493
+ upcast_attention=unet.config.upcast_attention,
494
+ resnet_time_scale_shift=unet.config.resnet_time_scale_shift,
495
+ projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,
496
+ mid_block_type=unet.config.mid_block_type,
497
+ controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
498
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
499
+ conditioning_channels=conditioning_channels,
500
+ )
501
+
502
+ if load_weights_from_unet:
503
+ controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())
504
+ controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())
505
+ controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())
506
+
507
+ if controlnet.class_embedding:
508
+ controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())
509
+
510
+ if hasattr(controlnet, "add_embedding"):
511
+ controlnet.add_embedding.load_state_dict(unet.add_embedding.state_dict())
512
+
513
+ controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())
514
+ controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())
515
+
516
+ return controlnet
517
+
518
+ @property
519
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
520
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
521
+ r"""
522
+ Returns:
523
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
524
+ indexed by its weight name.
525
+ """
526
+ # set recursively
527
+ processors = {}
528
+
529
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
530
+ if hasattr(module, "get_processor"):
531
+ processors[f"{name}.processor"] = module.get_processor()
532
+
533
+ for sub_name, child in module.named_children():
534
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
535
+
536
+ return processors
537
+
538
+ for name, module in self.named_children():
539
+ fn_recursive_add_processors(name, module, processors)
540
+
541
+ return processors
542
+
543
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
544
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
545
+ r"""
546
+ Sets the attention processor to use to compute attention.
547
+
548
+ Parameters:
549
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
550
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
551
+ for **all** `Attention` layers.
552
+
553
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
554
+ processor. This is strongly recommended when setting trainable attention processors.
555
+
556
+ """
557
+ count = len(self.attn_processors.keys())
558
+
559
+ if isinstance(processor, dict) and len(processor) != count:
560
+ raise ValueError(
561
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
562
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
563
+ )
564
+
565
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
566
+ if hasattr(module, "set_processor"):
567
+ if not isinstance(processor, dict):
568
+ module.set_processor(processor)
569
+ else:
570
+ module.set_processor(processor.pop(f"{name}.processor"))
571
+
572
+ for sub_name, child in module.named_children():
573
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
574
+
575
+ for name, module in self.named_children():
576
+ fn_recursive_attn_processor(name, module, processor)
577
+
578
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
579
+ def set_default_attn_processor(self):
580
+ """
581
+ Disables custom attention processors and sets the default attention implementation.
582
+ """
583
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
584
+ processor = AttnAddedKVProcessor()
585
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
586
+ processor = AttnProcessor()
587
+ else:
588
+ raise ValueError(
589
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
590
+ )
591
+
592
+ self.set_attn_processor(processor)
593
+
594
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice
595
+ def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None:
596
+ r"""
597
+ Enable sliced attention computation.
598
+
599
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
600
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
601
+
602
+ Args:
603
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
604
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
605
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
606
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
607
+ must be a multiple of `slice_size`.
608
+ """
609
+ sliceable_head_dims = []
610
+
611
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
612
+ if hasattr(module, "set_attention_slice"):
613
+ sliceable_head_dims.append(module.sliceable_head_dim)
614
+
615
+ for child in module.children():
616
+ fn_recursive_retrieve_sliceable_dims(child)
617
+
618
+ # retrieve number of attention layers
619
+ for module in self.children():
620
+ fn_recursive_retrieve_sliceable_dims(module)
621
+
622
+ num_sliceable_layers = len(sliceable_head_dims)
623
+
624
+ if slice_size == "auto":
625
+ # half the attention head size is usually a good trade-off between
626
+ # speed and memory
627
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
628
+ elif slice_size == "max":
629
+ # make smallest slice possible
630
+ slice_size = num_sliceable_layers * [1]
631
+
632
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
633
+
634
+ if len(slice_size) != len(sliceable_head_dims):
635
+ raise ValueError(
636
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
637
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
638
+ )
639
+
640
+ for i in range(len(slice_size)):
641
+ size = slice_size[i]
642
+ dim = sliceable_head_dims[i]
643
+ if size is not None and size > dim:
644
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
645
+
646
+ # Recursively walk through all the children.
647
+ # Any children which exposes the set_attention_slice method
648
+ # gets the message
649
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
650
+ if hasattr(module, "set_attention_slice"):
651
+ module.set_attention_slice(slice_size.pop())
652
+
653
+ for child in module.children():
654
+ fn_recursive_set_attention_slice(child, slice_size)
655
+
656
+ reversed_slice_size = list(reversed(slice_size))
657
+ for module in self.children():
658
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
659
+
660
+ def forward(
661
+ self,
662
+ sample: torch.Tensor,
663
+ timestep: Union[torch.Tensor, float, int],
664
+ encoder_hidden_states: torch.Tensor,
665
+ controlnet_cond: torch.Tensor,
666
+ conditioning_scale: float = 1.0,
667
+ class_labels: Optional[torch.Tensor] = None,
668
+ timestep_cond: Optional[torch.Tensor] = None,
669
+ attention_mask: Optional[torch.Tensor] = None,
670
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
671
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
672
+ guess_mode: bool = False,
673
+ return_dict: bool = True,
674
+ ) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]:
675
+ """
676
+ The [`ControlNetModel`] forward method.
677
+
678
+ Args:
679
+ sample (`torch.Tensor`):
680
+ The noisy input tensor.
681
+ timestep (`Union[torch.Tensor, float, int]`):
682
+ The number of timesteps to denoise an input.
683
+ encoder_hidden_states (`torch.Tensor`):
684
+ The encoder hidden states.
685
+ controlnet_cond (`torch.Tensor`):
686
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
687
+ conditioning_scale (`float`, defaults to `1.0`):
688
+ The scale factor for ControlNet outputs.
689
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
690
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
691
+ timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
692
+ Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
693
+ timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
694
+ embeddings.
695
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
696
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
697
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
698
+ negative values to the attention scores corresponding to "discard" tokens.
699
+ added_cond_kwargs (`dict`):
700
+ Additional conditions for the Stable Diffusion XL UNet.
701
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
702
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
703
+ guess_mode (`bool`, defaults to `False`):
704
+ In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
705
+ you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
706
+ return_dict (`bool`, defaults to `True`):
707
+ Whether or not to return a [`~models.controlnets.controlnet.ControlNetOutput`] instead of a plain
708
+ tuple.
709
+
710
+ Returns:
711
+ [`~models.controlnets.controlnet.ControlNetOutput`] **or** `tuple`:
712
+ If `return_dict` is `True`, a [`~models.controlnets.controlnet.ControlNetOutput`] is returned,
713
+ otherwise a tuple is returned where the first element is the sample tensor.
714
+ """
715
+ # check channel order
716
+ channel_order = self.config.controlnet_conditioning_channel_order
717
+
718
+ if channel_order == "rgb":
719
+ # in rgb order by default
720
+ ...
721
+ elif channel_order == "bgr":
722
+ controlnet_cond = torch.flip(controlnet_cond, dims=[1])
723
+ else:
724
+ raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
725
+
726
+ # prepare attention_mask
727
+ if attention_mask is not None:
728
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
729
+ attention_mask = attention_mask.unsqueeze(1)
730
+
731
+ # 1. time
732
+ timesteps = timestep
733
+ if not torch.is_tensor(timesteps):
734
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
735
+ # This would be a good case for the `match` statement (Python 3.10+)
736
+ is_mps = sample.device.type == "mps"
737
+ is_npu = sample.device.type == "npu"
738
+ if isinstance(timestep, float):
739
+ dtype = torch.float32 if (is_mps or is_npu) else torch.float64
740
+ else:
741
+ dtype = torch.int32 if (is_mps or is_npu) else torch.int64
742
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
743
+ elif len(timesteps.shape) == 0:
744
+ timesteps = timesteps[None].to(sample.device)
745
+
746
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
747
+ timesteps = timesteps.expand(sample.shape[0])
748
+
749
+ t_emb = self.time_proj(timesteps)
750
+
751
+ # timesteps does not contain any weights and will always return f32 tensors
752
+ # but time_embedding might actually be running in fp16. so we need to cast here.
753
+ # there might be better ways to encapsulate this.
754
+ t_emb = t_emb.to(dtype=sample.dtype)
755
+
756
+ emb = self.time_embedding(t_emb, timestep_cond)
757
+ aug_emb = None
758
+
759
+ if self.class_embedding is not None:
760
+ if class_labels is None:
761
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
762
+
763
+ if self.config.class_embed_type == "timestep":
764
+ class_labels = self.time_proj(class_labels)
765
+
766
+ class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
767
+ emb = emb + class_emb
768
+
769
+ if self.config.addition_embed_type is not None:
770
+ if self.config.addition_embed_type == "text":
771
+ aug_emb = self.add_embedding(encoder_hidden_states)
772
+
773
+ elif self.config.addition_embed_type == "text_time":
774
+ if "text_embeds" not in added_cond_kwargs:
775
+ raise ValueError(
776
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
777
+ )
778
+ text_embeds = added_cond_kwargs.get("text_embeds")
779
+ if "time_ids" not in added_cond_kwargs:
780
+ raise ValueError(
781
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
782
+ )
783
+ time_ids = added_cond_kwargs.get("time_ids")
784
+ time_embeds = self.add_time_proj(time_ids.flatten())
785
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
786
+
787
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
788
+ add_embeds = add_embeds.to(emb.dtype)
789
+ aug_emb = self.add_embedding(add_embeds)
790
+
791
+ emb = emb + aug_emb if aug_emb is not None else emb
792
+
793
+ # 2. pre-process
794
+ sample = self.conv_in(sample)
795
+
796
+ controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
797
+ sample = sample + controlnet_cond
798
+
799
+ # 3. down
800
+ down_block_res_samples = (sample,)
801
+ for downsample_block in self.down_blocks:
802
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
803
+ sample, res_samples = downsample_block(
804
+ hidden_states=sample,
805
+ temb=emb,
806
+ encoder_hidden_states=encoder_hidden_states,
807
+ attention_mask=attention_mask,
808
+ cross_attention_kwargs=cross_attention_kwargs,
809
+ )
810
+ else:
811
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
812
+
813
+ down_block_res_samples += res_samples
814
+
815
+ # 4. mid
816
+ if self.mid_block is not None:
817
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
818
+ sample = self.mid_block(
819
+ sample,
820
+ emb,
821
+ encoder_hidden_states=encoder_hidden_states,
822
+ attention_mask=attention_mask,
823
+ cross_attention_kwargs=cross_attention_kwargs,
824
+ )
825
+ else:
826
+ sample = self.mid_block(sample, emb)
827
+
828
+ # 5. Control net blocks
829
+
830
+ controlnet_down_block_res_samples = ()
831
+
832
+ for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
833
+ down_block_res_sample = controlnet_block(down_block_res_sample)
834
+ controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
835
+
836
+ down_block_res_samples = controlnet_down_block_res_samples
837
+
838
+ mid_block_res_sample = self.controlnet_mid_block(sample)
839
+
840
+ # 6. scaling
841
+ if guess_mode and not self.config.global_pool_conditions:
842
+ scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
843
+ scales = scales * conditioning_scale
844
+ down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
845
+ mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
846
+ else:
847
+ down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
848
+ mid_block_res_sample = mid_block_res_sample * conditioning_scale
849
+
850
+ if self.config.global_pool_conditions:
851
+ down_block_res_samples = [
852
+ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
853
+ ]
854
+ mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True)
855
+
856
+ if not return_dict:
857
+ return (down_block_res_samples, mid_block_res_sample)
858
+
859
+ return ControlNetOutput(
860
+ down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
861
+ )
862
+
863
+
864
+ def zero_module(module):
865
+ for p in module.parameters():
866
+ nn.init.zeros_(p)
867
+ return module
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_flax.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional, Tuple, Union
15
+
16
+ import flax
17
+ import flax.linen as nn
18
+ import jax
19
+ import jax.numpy as jnp
20
+ from flax.core.frozen_dict import FrozenDict
21
+
22
+ from ...configuration_utils import ConfigMixin, flax_register_to_config
23
+ from ...utils import BaseOutput, logging
24
+ from ..embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
25
+ from ..modeling_flax_utils import FlaxModelMixin
26
+ from ..unets.unet_2d_blocks_flax import (
27
+ FlaxCrossAttnDownBlock2D,
28
+ FlaxDownBlock2D,
29
+ FlaxUNetMidBlock2DCrossAttn,
30
+ )
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ @flax.struct.dataclass
37
+ class FlaxControlNetOutput(BaseOutput):
38
+ """
39
+ The output of [`FlaxControlNetModel`].
40
+
41
+ Args:
42
+ down_block_res_samples (`jnp.ndarray`):
43
+ mid_block_res_sample (`jnp.ndarray`):
44
+ """
45
+
46
+ down_block_res_samples: jnp.ndarray
47
+ mid_block_res_sample: jnp.ndarray
48
+
49
+
50
+ class FlaxControlNetConditioningEmbedding(nn.Module):
51
+ conditioning_embedding_channels: int
52
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256)
53
+ dtype: jnp.dtype = jnp.float32
54
+
55
+ def setup(self) -> None:
56
+ logger.warning(
57
+ "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We "
58
+ "recommend migrating to PyTorch classes or pinning your version of Diffusers."
59
+ )
60
+
61
+ self.conv_in = nn.Conv(
62
+ self.block_out_channels[0],
63
+ kernel_size=(3, 3),
64
+ padding=((1, 1), (1, 1)),
65
+ dtype=self.dtype,
66
+ )
67
+
68
+ blocks = []
69
+ for i in range(len(self.block_out_channels) - 1):
70
+ channel_in = self.block_out_channels[i]
71
+ channel_out = self.block_out_channels[i + 1]
72
+ conv1 = nn.Conv(
73
+ channel_in,
74
+ kernel_size=(3, 3),
75
+ padding=((1, 1), (1, 1)),
76
+ dtype=self.dtype,
77
+ )
78
+ blocks.append(conv1)
79
+ conv2 = nn.Conv(
80
+ channel_out,
81
+ kernel_size=(3, 3),
82
+ strides=(2, 2),
83
+ padding=((1, 1), (1, 1)),
84
+ dtype=self.dtype,
85
+ )
86
+ blocks.append(conv2)
87
+ self.blocks = blocks
88
+
89
+ self.conv_out = nn.Conv(
90
+ self.conditioning_embedding_channels,
91
+ kernel_size=(3, 3),
92
+ padding=((1, 1), (1, 1)),
93
+ kernel_init=nn.initializers.zeros_init(),
94
+ bias_init=nn.initializers.zeros_init(),
95
+ dtype=self.dtype,
96
+ )
97
+
98
+ def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray:
99
+ embedding = self.conv_in(conditioning)
100
+ embedding = nn.silu(embedding)
101
+
102
+ for block in self.blocks:
103
+ embedding = block(embedding)
104
+ embedding = nn.silu(embedding)
105
+
106
+ embedding = self.conv_out(embedding)
107
+
108
+ return embedding
109
+
110
+
111
+ @flax_register_to_config
112
+ class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin):
113
+ r"""
114
+ A ControlNet model.
115
+
116
+ This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it’s generic methods
117
+ implemented for all models (such as downloading or saving).
118
+
119
+ This model is also a Flax Linen [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
120
+ subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its
121
+ general usage and behavior.
122
+
123
+ Inherent JAX features such as the following are supported:
124
+
125
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
126
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
127
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
128
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
129
+
130
+ Parameters:
131
+ sample_size (`int`, *optional*):
132
+ The size of the input sample.
133
+ in_channels (`int`, *optional*, defaults to 4):
134
+ The number of channels in the input sample.
135
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`):
136
+ The tuple of downsample blocks to use.
137
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
138
+ The tuple of output channels for each block.
139
+ layers_per_block (`int`, *optional*, defaults to 2):
140
+ The number of layers per block.
141
+ attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8):
142
+ The dimension of the attention heads.
143
+ num_attention_heads (`int` or `Tuple[int]`, *optional*):
144
+ The number of attention heads.
145
+ cross_attention_dim (`int`, *optional*, defaults to 768):
146
+ The dimension of the cross attention features.
147
+ dropout (`float`, *optional*, defaults to 0):
148
+ Dropout probability for down, up and bottleneck blocks.
149
+ flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
150
+ Whether to flip the sin to cos in the time embedding.
151
+ freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
152
+ controlnet_conditioning_channel_order (`str`, *optional*, defaults to `rgb`):
153
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
154
+ conditioning_embedding_out_channels (`tuple`, *optional*, defaults to `(16, 32, 96, 256)`):
155
+ The tuple of output channel for each block in the `conditioning_embedding` layer.
156
+ """
157
+
158
+ sample_size: int = 32
159
+ in_channels: int = 4
160
+ down_block_types: Tuple[str, ...] = (
161
+ "CrossAttnDownBlock2D",
162
+ "CrossAttnDownBlock2D",
163
+ "CrossAttnDownBlock2D",
164
+ "DownBlock2D",
165
+ )
166
+ only_cross_attention: Union[bool, Tuple[bool, ...]] = False
167
+ block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280)
168
+ layers_per_block: int = 2
169
+ attention_head_dim: Union[int, Tuple[int, ...]] = 8
170
+ num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None
171
+ cross_attention_dim: int = 1280
172
+ dropout: float = 0.0
173
+ use_linear_projection: bool = False
174
+ dtype: jnp.dtype = jnp.float32
175
+ flip_sin_to_cos: bool = True
176
+ freq_shift: int = 0
177
+ controlnet_conditioning_channel_order: str = "rgb"
178
+ conditioning_embedding_out_channels: Tuple[int, ...] = (16, 32, 96, 256)
179
+
180
+ def init_weights(self, rng: jax.Array) -> FrozenDict:
181
+ # init input tensors
182
+ sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
183
+ sample = jnp.zeros(sample_shape, dtype=jnp.float32)
184
+ timesteps = jnp.ones((1,), dtype=jnp.int32)
185
+ encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32)
186
+ controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8)
187
+ controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32)
188
+
189
+ params_rng, dropout_rng = jax.random.split(rng)
190
+ rngs = {"params": params_rng, "dropout": dropout_rng}
191
+
192
+ return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"]
193
+
194
+ def setup(self) -> None:
195
+ logger.warning(
196
+ "Flax classes are deprecated and will be removed in Diffusers v1.0.0. We "
197
+ "recommend migrating to PyTorch classes or pinning your version of Diffusers."
198
+ )
199
+
200
+ block_out_channels = self.block_out_channels
201
+ time_embed_dim = block_out_channels[0] * 4
202
+
203
+ # If `num_attention_heads` is not defined (which is the case for most models)
204
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
205
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
206
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
207
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
208
+ # which is why we correct for the naming here.
209
+ num_attention_heads = self.num_attention_heads or self.attention_head_dim
210
+
211
+ # input
212
+ self.conv_in = nn.Conv(
213
+ block_out_channels[0],
214
+ kernel_size=(3, 3),
215
+ strides=(1, 1),
216
+ padding=((1, 1), (1, 1)),
217
+ dtype=self.dtype,
218
+ )
219
+
220
+ # time
221
+ self.time_proj = FlaxTimesteps(
222
+ block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift
223
+ )
224
+ self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
225
+
226
+ self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding(
227
+ conditioning_embedding_channels=block_out_channels[0],
228
+ block_out_channels=self.conditioning_embedding_out_channels,
229
+ )
230
+
231
+ only_cross_attention = self.only_cross_attention
232
+ if isinstance(only_cross_attention, bool):
233
+ only_cross_attention = (only_cross_attention,) * len(self.down_block_types)
234
+
235
+ if isinstance(num_attention_heads, int):
236
+ num_attention_heads = (num_attention_heads,) * len(self.down_block_types)
237
+
238
+ # down
239
+ down_blocks = []
240
+ controlnet_down_blocks = []
241
+
242
+ output_channel = block_out_channels[0]
243
+
244
+ controlnet_block = nn.Conv(
245
+ output_channel,
246
+ kernel_size=(1, 1),
247
+ padding="VALID",
248
+ kernel_init=nn.initializers.zeros_init(),
249
+ bias_init=nn.initializers.zeros_init(),
250
+ dtype=self.dtype,
251
+ )
252
+ controlnet_down_blocks.append(controlnet_block)
253
+
254
+ for i, down_block_type in enumerate(self.down_block_types):
255
+ input_channel = output_channel
256
+ output_channel = block_out_channels[i]
257
+ is_final_block = i == len(block_out_channels) - 1
258
+
259
+ if down_block_type == "CrossAttnDownBlock2D":
260
+ down_block = FlaxCrossAttnDownBlock2D(
261
+ in_channels=input_channel,
262
+ out_channels=output_channel,
263
+ dropout=self.dropout,
264
+ num_layers=self.layers_per_block,
265
+ num_attention_heads=num_attention_heads[i],
266
+ add_downsample=not is_final_block,
267
+ use_linear_projection=self.use_linear_projection,
268
+ only_cross_attention=only_cross_attention[i],
269
+ dtype=self.dtype,
270
+ )
271
+ else:
272
+ down_block = FlaxDownBlock2D(
273
+ in_channels=input_channel,
274
+ out_channels=output_channel,
275
+ dropout=self.dropout,
276
+ num_layers=self.layers_per_block,
277
+ add_downsample=not is_final_block,
278
+ dtype=self.dtype,
279
+ )
280
+
281
+ down_blocks.append(down_block)
282
+
283
+ for _ in range(self.layers_per_block):
284
+ controlnet_block = nn.Conv(
285
+ output_channel,
286
+ kernel_size=(1, 1),
287
+ padding="VALID",
288
+ kernel_init=nn.initializers.zeros_init(),
289
+ bias_init=nn.initializers.zeros_init(),
290
+ dtype=self.dtype,
291
+ )
292
+ controlnet_down_blocks.append(controlnet_block)
293
+
294
+ if not is_final_block:
295
+ controlnet_block = nn.Conv(
296
+ output_channel,
297
+ kernel_size=(1, 1),
298
+ padding="VALID",
299
+ kernel_init=nn.initializers.zeros_init(),
300
+ bias_init=nn.initializers.zeros_init(),
301
+ dtype=self.dtype,
302
+ )
303
+ controlnet_down_blocks.append(controlnet_block)
304
+
305
+ self.down_blocks = down_blocks
306
+ self.controlnet_down_blocks = controlnet_down_blocks
307
+
308
+ # mid
309
+ mid_block_channel = block_out_channels[-1]
310
+ self.mid_block = FlaxUNetMidBlock2DCrossAttn(
311
+ in_channels=mid_block_channel,
312
+ dropout=self.dropout,
313
+ num_attention_heads=num_attention_heads[-1],
314
+ use_linear_projection=self.use_linear_projection,
315
+ dtype=self.dtype,
316
+ )
317
+
318
+ self.controlnet_mid_block = nn.Conv(
319
+ mid_block_channel,
320
+ kernel_size=(1, 1),
321
+ padding="VALID",
322
+ kernel_init=nn.initializers.zeros_init(),
323
+ bias_init=nn.initializers.zeros_init(),
324
+ dtype=self.dtype,
325
+ )
326
+
327
+ def __call__(
328
+ self,
329
+ sample: jnp.ndarray,
330
+ timesteps: Union[jnp.ndarray, float, int],
331
+ encoder_hidden_states: jnp.ndarray,
332
+ controlnet_cond: jnp.ndarray,
333
+ conditioning_scale: float = 1.0,
334
+ return_dict: bool = True,
335
+ train: bool = False,
336
+ ) -> Union[FlaxControlNetOutput, Tuple[Tuple[jnp.ndarray, ...], jnp.ndarray]]:
337
+ r"""
338
+ Args:
339
+ sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor
340
+ timestep (`jnp.ndarray` or `float` or `int`): timesteps
341
+ encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states
342
+ controlnet_cond (`jnp.ndarray`): (batch, channel, height, width) the conditional input tensor
343
+ conditioning_scale (`float`, *optional*, defaults to `1.0`): the scale factor for controlnet outputs
344
+ return_dict (`bool`, *optional*, defaults to `True`):
345
+ Whether or not to return a [`models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of
346
+ a plain tuple.
347
+ train (`bool`, *optional*, defaults to `False`):
348
+ Use deterministic functions and disable dropout when not training.
349
+
350
+ Returns:
351
+ [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`:
352
+ [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise
353
+ a `tuple`. When returning a tuple, the first element is the sample tensor.
354
+ """
355
+ channel_order = self.controlnet_conditioning_channel_order
356
+ if channel_order == "bgr":
357
+ controlnet_cond = jnp.flip(controlnet_cond, axis=1)
358
+
359
+ # 1. time
360
+ if not isinstance(timesteps, jnp.ndarray):
361
+ timesteps = jnp.array([timesteps], dtype=jnp.int32)
362
+ elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0:
363
+ timesteps = timesteps.astype(dtype=jnp.float32)
364
+ timesteps = jnp.expand_dims(timesteps, 0)
365
+
366
+ t_emb = self.time_proj(timesteps)
367
+ t_emb = self.time_embedding(t_emb)
368
+
369
+ # 2. pre-process
370
+ sample = jnp.transpose(sample, (0, 2, 3, 1))
371
+ sample = self.conv_in(sample)
372
+
373
+ controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1))
374
+ controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
375
+ sample += controlnet_cond
376
+
377
+ # 3. down
378
+ down_block_res_samples = (sample,)
379
+ for down_block in self.down_blocks:
380
+ if isinstance(down_block, FlaxCrossAttnDownBlock2D):
381
+ sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
382
+ else:
383
+ sample, res_samples = down_block(sample, t_emb, deterministic=not train)
384
+ down_block_res_samples += res_samples
385
+
386
+ # 4. mid
387
+ sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
388
+
389
+ # 5. contronet blocks
390
+ controlnet_down_block_res_samples = ()
391
+ for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
392
+ down_block_res_sample = controlnet_block(down_block_res_sample)
393
+ controlnet_down_block_res_samples += (down_block_res_sample,)
394
+
395
+ down_block_res_samples = controlnet_down_block_res_samples
396
+
397
+ mid_block_res_sample = self.controlnet_mid_block(sample)
398
+
399
+ # 6. scaling
400
+ down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
401
+ mid_block_res_sample *= conditioning_scale
402
+
403
+ if not return_dict:
404
+ return (down_block_res_samples, mid_block_res_sample)
405
+
406
+ return FlaxControlNetOutput(
407
+ down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
408
+ )
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_flux.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders import PeftAdapterMixin
23
+ from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers
24
+ from ..attention_processor import AttentionProcessor
25
+ from ..controlnets.controlnet import ControlNetConditioningEmbedding, zero_module
26
+ from ..embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed
27
+ from ..modeling_outputs import Transformer2DModelOutput
28
+ from ..modeling_utils import ModelMixin
29
+ from ..transformers.transformer_flux import FluxSingleTransformerBlock, FluxTransformerBlock
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+
35
+ @dataclass
36
+ class FluxControlNetOutput(BaseOutput):
37
+ controlnet_block_samples: Tuple[torch.Tensor]
38
+ controlnet_single_block_samples: Tuple[torch.Tensor]
39
+
40
+
41
+ class FluxControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
42
+ _supports_gradient_checkpointing = True
43
+
44
+ @register_to_config
45
+ def __init__(
46
+ self,
47
+ patch_size: int = 1,
48
+ in_channels: int = 64,
49
+ num_layers: int = 19,
50
+ num_single_layers: int = 38,
51
+ attention_head_dim: int = 128,
52
+ num_attention_heads: int = 24,
53
+ joint_attention_dim: int = 4096,
54
+ pooled_projection_dim: int = 768,
55
+ guidance_embeds: bool = False,
56
+ axes_dims_rope: List[int] = [16, 56, 56],
57
+ num_mode: int = None,
58
+ conditioning_embedding_channels: int = None,
59
+ ):
60
+ super().__init__()
61
+ self.out_channels = in_channels
62
+ self.inner_dim = num_attention_heads * attention_head_dim
63
+
64
+ self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope)
65
+ text_time_guidance_cls = (
66
+ CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings
67
+ )
68
+ self.time_text_embed = text_time_guidance_cls(
69
+ embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim
70
+ )
71
+
72
+ self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim)
73
+ self.x_embedder = torch.nn.Linear(in_channels, self.inner_dim)
74
+
75
+ self.transformer_blocks = nn.ModuleList(
76
+ [
77
+ FluxTransformerBlock(
78
+ dim=self.inner_dim,
79
+ num_attention_heads=num_attention_heads,
80
+ attention_head_dim=attention_head_dim,
81
+ )
82
+ for i in range(num_layers)
83
+ ]
84
+ )
85
+
86
+ self.single_transformer_blocks = nn.ModuleList(
87
+ [
88
+ FluxSingleTransformerBlock(
89
+ dim=self.inner_dim,
90
+ num_attention_heads=num_attention_heads,
91
+ attention_head_dim=attention_head_dim,
92
+ )
93
+ for i in range(num_single_layers)
94
+ ]
95
+ )
96
+
97
+ # controlnet_blocks
98
+ self.controlnet_blocks = nn.ModuleList([])
99
+ for _ in range(len(self.transformer_blocks)):
100
+ self.controlnet_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim)))
101
+
102
+ self.controlnet_single_blocks = nn.ModuleList([])
103
+ for _ in range(len(self.single_transformer_blocks)):
104
+ self.controlnet_single_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim)))
105
+
106
+ self.union = num_mode is not None
107
+ if self.union:
108
+ self.controlnet_mode_embedder = nn.Embedding(num_mode, self.inner_dim)
109
+
110
+ if conditioning_embedding_channels is not None:
111
+ self.input_hint_block = ControlNetConditioningEmbedding(
112
+ conditioning_embedding_channels=conditioning_embedding_channels, block_out_channels=(16, 16, 16, 16)
113
+ )
114
+ self.controlnet_x_embedder = torch.nn.Linear(in_channels, self.inner_dim)
115
+ else:
116
+ self.input_hint_block = None
117
+ self.controlnet_x_embedder = zero_module(torch.nn.Linear(in_channels, self.inner_dim))
118
+
119
+ self.gradient_checkpointing = False
120
+
121
+ @property
122
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
123
+ def attn_processors(self):
124
+ r"""
125
+ Returns:
126
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
127
+ indexed by its weight name.
128
+ """
129
+ # set recursively
130
+ processors = {}
131
+
132
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
133
+ if hasattr(module, "get_processor"):
134
+ processors[f"{name}.processor"] = module.get_processor()
135
+
136
+ for sub_name, child in module.named_children():
137
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
138
+
139
+ return processors
140
+
141
+ for name, module in self.named_children():
142
+ fn_recursive_add_processors(name, module, processors)
143
+
144
+ return processors
145
+
146
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
147
+ def set_attn_processor(self, processor):
148
+ r"""
149
+ Sets the attention processor to use to compute attention.
150
+
151
+ Parameters:
152
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
153
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
154
+ for **all** `Attention` layers.
155
+
156
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
157
+ processor. This is strongly recommended when setting trainable attention processors.
158
+
159
+ """
160
+ count = len(self.attn_processors.keys())
161
+
162
+ if isinstance(processor, dict) and len(processor) != count:
163
+ raise ValueError(
164
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
165
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
166
+ )
167
+
168
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
169
+ if hasattr(module, "set_processor"):
170
+ if not isinstance(processor, dict):
171
+ module.set_processor(processor)
172
+ else:
173
+ module.set_processor(processor.pop(f"{name}.processor"))
174
+
175
+ for sub_name, child in module.named_children():
176
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
177
+
178
+ for name, module in self.named_children():
179
+ fn_recursive_attn_processor(name, module, processor)
180
+
181
+ @classmethod
182
+ def from_transformer(
183
+ cls,
184
+ transformer,
185
+ num_layers: int = 4,
186
+ num_single_layers: int = 10,
187
+ attention_head_dim: int = 128,
188
+ num_attention_heads: int = 24,
189
+ load_weights_from_transformer=True,
190
+ ):
191
+ config = dict(transformer.config)
192
+ config["num_layers"] = num_layers
193
+ config["num_single_layers"] = num_single_layers
194
+ config["attention_head_dim"] = attention_head_dim
195
+ config["num_attention_heads"] = num_attention_heads
196
+
197
+ controlnet = cls.from_config(config)
198
+
199
+ if load_weights_from_transformer:
200
+ controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict())
201
+ controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict())
202
+ controlnet.context_embedder.load_state_dict(transformer.context_embedder.state_dict())
203
+ controlnet.x_embedder.load_state_dict(transformer.x_embedder.state_dict())
204
+ controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict(), strict=False)
205
+ controlnet.single_transformer_blocks.load_state_dict(
206
+ transformer.single_transformer_blocks.state_dict(), strict=False
207
+ )
208
+
209
+ controlnet.controlnet_x_embedder = zero_module(controlnet.controlnet_x_embedder)
210
+
211
+ return controlnet
212
+
213
+ def forward(
214
+ self,
215
+ hidden_states: torch.Tensor,
216
+ controlnet_cond: torch.Tensor,
217
+ controlnet_mode: torch.Tensor = None,
218
+ conditioning_scale: float = 1.0,
219
+ encoder_hidden_states: torch.Tensor = None,
220
+ pooled_projections: torch.Tensor = None,
221
+ timestep: torch.LongTensor = None,
222
+ img_ids: torch.Tensor = None,
223
+ txt_ids: torch.Tensor = None,
224
+ guidance: torch.Tensor = None,
225
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
226
+ return_dict: bool = True,
227
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
228
+ """
229
+ The [`FluxTransformer2DModel`] forward method.
230
+
231
+ Args:
232
+ hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`):
233
+ Input `hidden_states`.
234
+ controlnet_cond (`torch.Tensor`):
235
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
236
+ controlnet_mode (`torch.Tensor`):
237
+ The mode tensor of shape `(batch_size, 1)`.
238
+ conditioning_scale (`float`, defaults to `1.0`):
239
+ The scale factor for ControlNet outputs.
240
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`):
241
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
242
+ pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected
243
+ from the embeddings of input conditions.
244
+ timestep ( `torch.LongTensor`):
245
+ Used to indicate denoising step.
246
+ block_controlnet_hidden_states: (`list` of `torch.Tensor`):
247
+ A list of tensors that if specified are added to the residuals of transformer blocks.
248
+ joint_attention_kwargs (`dict`, *optional*):
249
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
250
+ `self.processor` in
251
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
252
+ return_dict (`bool`, *optional*, defaults to `True`):
253
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
254
+ tuple.
255
+
256
+ Returns:
257
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
258
+ `tuple` where the first element is the sample tensor.
259
+ """
260
+ if joint_attention_kwargs is not None:
261
+ joint_attention_kwargs = joint_attention_kwargs.copy()
262
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
263
+ else:
264
+ lora_scale = 1.0
265
+
266
+ if USE_PEFT_BACKEND:
267
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
268
+ scale_lora_layers(self, lora_scale)
269
+ else:
270
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
271
+ logger.warning(
272
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
273
+ )
274
+ hidden_states = self.x_embedder(hidden_states)
275
+
276
+ if self.input_hint_block is not None:
277
+ controlnet_cond = self.input_hint_block(controlnet_cond)
278
+ batch_size, channels, height_pw, width_pw = controlnet_cond.shape
279
+ height = height_pw // self.config.patch_size
280
+ width = width_pw // self.config.patch_size
281
+ controlnet_cond = controlnet_cond.reshape(
282
+ batch_size, channels, height, self.config.patch_size, width, self.config.patch_size
283
+ )
284
+ controlnet_cond = controlnet_cond.permute(0, 2, 4, 1, 3, 5)
285
+ controlnet_cond = controlnet_cond.reshape(batch_size, height * width, -1)
286
+ # add
287
+ hidden_states = hidden_states + self.controlnet_x_embedder(controlnet_cond)
288
+
289
+ timestep = timestep.to(hidden_states.dtype) * 1000
290
+ if guidance is not None:
291
+ guidance = guidance.to(hidden_states.dtype) * 1000
292
+ else:
293
+ guidance = None
294
+ temb = (
295
+ self.time_text_embed(timestep, pooled_projections)
296
+ if guidance is None
297
+ else self.time_text_embed(timestep, guidance, pooled_projections)
298
+ )
299
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
300
+
301
+ if txt_ids.ndim == 3:
302
+ logger.warning(
303
+ "Passing `txt_ids` 3d torch.Tensor is deprecated."
304
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
305
+ )
306
+ txt_ids = txt_ids[0]
307
+ if img_ids.ndim == 3:
308
+ logger.warning(
309
+ "Passing `img_ids` 3d torch.Tensor is deprecated."
310
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
311
+ )
312
+ img_ids = img_ids[0]
313
+
314
+ if self.union:
315
+ # union mode
316
+ if controlnet_mode is None:
317
+ raise ValueError("`controlnet_mode` cannot be `None` when applying ControlNet-Union")
318
+ # union mode emb
319
+ controlnet_mode_emb = self.controlnet_mode_embedder(controlnet_mode)
320
+ encoder_hidden_states = torch.cat([controlnet_mode_emb, encoder_hidden_states], dim=1)
321
+ txt_ids = torch.cat([txt_ids[:1], txt_ids], dim=0)
322
+
323
+ ids = torch.cat((txt_ids, img_ids), dim=0)
324
+ image_rotary_emb = self.pos_embed(ids)
325
+
326
+ block_samples = ()
327
+ for index_block, block in enumerate(self.transformer_blocks):
328
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
329
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
330
+ block,
331
+ hidden_states,
332
+ encoder_hidden_states,
333
+ temb,
334
+ image_rotary_emb,
335
+ )
336
+
337
+ else:
338
+ encoder_hidden_states, hidden_states = block(
339
+ hidden_states=hidden_states,
340
+ encoder_hidden_states=encoder_hidden_states,
341
+ temb=temb,
342
+ image_rotary_emb=image_rotary_emb,
343
+ )
344
+ block_samples = block_samples + (hidden_states,)
345
+
346
+ single_block_samples = ()
347
+ for index_block, block in enumerate(self.single_transformer_blocks):
348
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
349
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
350
+ block,
351
+ hidden_states,
352
+ encoder_hidden_states,
353
+ temb,
354
+ image_rotary_emb,
355
+ )
356
+
357
+ else:
358
+ encoder_hidden_states, hidden_states = block(
359
+ hidden_states=hidden_states,
360
+ encoder_hidden_states=encoder_hidden_states,
361
+ temb=temb,
362
+ image_rotary_emb=image_rotary_emb,
363
+ )
364
+ single_block_samples = single_block_samples + (hidden_states,)
365
+
366
+ # controlnet block
367
+ controlnet_block_samples = ()
368
+ for block_sample, controlnet_block in zip(block_samples, self.controlnet_blocks):
369
+ block_sample = controlnet_block(block_sample)
370
+ controlnet_block_samples = controlnet_block_samples + (block_sample,)
371
+
372
+ controlnet_single_block_samples = ()
373
+ for single_block_sample, controlnet_block in zip(single_block_samples, self.controlnet_single_blocks):
374
+ single_block_sample = controlnet_block(single_block_sample)
375
+ controlnet_single_block_samples = controlnet_single_block_samples + (single_block_sample,)
376
+
377
+ # scaling
378
+ controlnet_block_samples = [sample * conditioning_scale for sample in controlnet_block_samples]
379
+ controlnet_single_block_samples = [sample * conditioning_scale for sample in controlnet_single_block_samples]
380
+
381
+ controlnet_block_samples = None if len(controlnet_block_samples) == 0 else controlnet_block_samples
382
+ controlnet_single_block_samples = (
383
+ None if len(controlnet_single_block_samples) == 0 else controlnet_single_block_samples
384
+ )
385
+
386
+ if USE_PEFT_BACKEND:
387
+ # remove `lora_scale` from each PEFT layer
388
+ unscale_lora_layers(self, lora_scale)
389
+
390
+ if not return_dict:
391
+ return (controlnet_block_samples, controlnet_single_block_samples)
392
+
393
+ return FluxControlNetOutput(
394
+ controlnet_block_samples=controlnet_block_samples,
395
+ controlnet_single_block_samples=controlnet_single_block_samples,
396
+ )
397
+
398
+
399
+ class FluxMultiControlNetModel(ModelMixin):
400
+ r"""
401
+ `FluxMultiControlNetModel` wrapper class for Multi-FluxControlNetModel
402
+
403
+ This module is a wrapper for multiple instances of the `FluxControlNetModel`. The `forward()` API is designed to be
404
+ compatible with `FluxControlNetModel`.
405
+
406
+ Args:
407
+ controlnets (`List[FluxControlNetModel]`):
408
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
409
+ `FluxControlNetModel` as a list.
410
+ """
411
+
412
+ def __init__(self, controlnets):
413
+ super().__init__()
414
+ self.nets = nn.ModuleList(controlnets)
415
+
416
+ def forward(
417
+ self,
418
+ hidden_states: torch.FloatTensor,
419
+ controlnet_cond: List[torch.tensor],
420
+ controlnet_mode: List[torch.tensor],
421
+ conditioning_scale: List[float],
422
+ encoder_hidden_states: torch.Tensor = None,
423
+ pooled_projections: torch.Tensor = None,
424
+ timestep: torch.LongTensor = None,
425
+ img_ids: torch.Tensor = None,
426
+ txt_ids: torch.Tensor = None,
427
+ guidance: torch.Tensor = None,
428
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
429
+ return_dict: bool = True,
430
+ ) -> Union[FluxControlNetOutput, Tuple]:
431
+ # ControlNet-Union with multiple conditions
432
+ # only load one ControlNet for saving memories
433
+ if len(self.nets) == 1:
434
+ controlnet = self.nets[0]
435
+
436
+ for i, (image, mode, scale) in enumerate(zip(controlnet_cond, controlnet_mode, conditioning_scale)):
437
+ block_samples, single_block_samples = controlnet(
438
+ hidden_states=hidden_states,
439
+ controlnet_cond=image,
440
+ controlnet_mode=mode[:, None],
441
+ conditioning_scale=scale,
442
+ timestep=timestep,
443
+ guidance=guidance,
444
+ pooled_projections=pooled_projections,
445
+ encoder_hidden_states=encoder_hidden_states,
446
+ txt_ids=txt_ids,
447
+ img_ids=img_ids,
448
+ joint_attention_kwargs=joint_attention_kwargs,
449
+ return_dict=return_dict,
450
+ )
451
+
452
+ # merge samples
453
+ if i == 0:
454
+ control_block_samples = block_samples
455
+ control_single_block_samples = single_block_samples
456
+ else:
457
+ if block_samples is not None and control_block_samples is not None:
458
+ control_block_samples = [
459
+ control_block_sample + block_sample
460
+ for control_block_sample, block_sample in zip(control_block_samples, block_samples)
461
+ ]
462
+ if single_block_samples is not None and control_single_block_samples is not None:
463
+ control_single_block_samples = [
464
+ control_single_block_sample + block_sample
465
+ for control_single_block_sample, block_sample in zip(
466
+ control_single_block_samples, single_block_samples
467
+ )
468
+ ]
469
+
470
+ # Regular Multi-ControlNets
471
+ # load all ControlNets into memories
472
+ else:
473
+ for i, (image, mode, scale, controlnet) in enumerate(
474
+ zip(controlnet_cond, controlnet_mode, conditioning_scale, self.nets)
475
+ ):
476
+ block_samples, single_block_samples = controlnet(
477
+ hidden_states=hidden_states,
478
+ controlnet_cond=image,
479
+ controlnet_mode=mode[:, None],
480
+ conditioning_scale=scale,
481
+ timestep=timestep,
482
+ guidance=guidance,
483
+ pooled_projections=pooled_projections,
484
+ encoder_hidden_states=encoder_hidden_states,
485
+ txt_ids=txt_ids,
486
+ img_ids=img_ids,
487
+ joint_attention_kwargs=joint_attention_kwargs,
488
+ return_dict=return_dict,
489
+ )
490
+
491
+ # merge samples
492
+ if i == 0:
493
+ control_block_samples = block_samples
494
+ control_single_block_samples = single_block_samples
495
+ else:
496
+ if block_samples is not None and control_block_samples is not None:
497
+ control_block_samples = [
498
+ control_block_sample + block_sample
499
+ for control_block_sample, block_sample in zip(control_block_samples, block_samples)
500
+ ]
501
+ if single_block_samples is not None and control_single_block_samples is not None:
502
+ control_single_block_samples = [
503
+ control_single_block_sample + block_sample
504
+ for control_single_block_sample, block_sample in zip(
505
+ control_single_block_samples, single_block_samples
506
+ )
507
+ ]
508
+
509
+ return control_block_samples, control_single_block_samples
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_hunyuan.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 HunyuanDiT Authors, Qixun Wang and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Dict, Optional, Union
16
+
17
+ import torch
18
+ from torch import nn
19
+
20
+ from ...configuration_utils import ConfigMixin, register_to_config
21
+ from ...utils import BaseOutput, logging
22
+ from ..attention_processor import AttentionProcessor
23
+ from ..embeddings import (
24
+ HunyuanCombinedTimestepTextSizeStyleEmbedding,
25
+ PatchEmbed,
26
+ PixArtAlphaTextProjection,
27
+ )
28
+ from ..modeling_utils import ModelMixin
29
+ from ..transformers.hunyuan_transformer_2d import HunyuanDiTBlock
30
+ from .controlnet import Tuple, zero_module
31
+
32
+
33
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
+
35
+
36
+ @dataclass
37
+ class HunyuanControlNetOutput(BaseOutput):
38
+ controlnet_block_samples: Tuple[torch.Tensor]
39
+
40
+
41
+ class HunyuanDiT2DControlNetModel(ModelMixin, ConfigMixin):
42
+ @register_to_config
43
+ def __init__(
44
+ self,
45
+ conditioning_channels: int = 3,
46
+ num_attention_heads: int = 16,
47
+ attention_head_dim: int = 88,
48
+ in_channels: Optional[int] = None,
49
+ patch_size: Optional[int] = None,
50
+ activation_fn: str = "gelu-approximate",
51
+ sample_size=32,
52
+ hidden_size=1152,
53
+ transformer_num_layers: int = 40,
54
+ mlp_ratio: float = 4.0,
55
+ cross_attention_dim: int = 1024,
56
+ cross_attention_dim_t5: int = 2048,
57
+ pooled_projection_dim: int = 1024,
58
+ text_len: int = 77,
59
+ text_len_t5: int = 256,
60
+ use_style_cond_and_image_meta_size: bool = True,
61
+ ):
62
+ super().__init__()
63
+ self.num_heads = num_attention_heads
64
+ self.inner_dim = num_attention_heads * attention_head_dim
65
+
66
+ self.text_embedder = PixArtAlphaTextProjection(
67
+ in_features=cross_attention_dim_t5,
68
+ hidden_size=cross_attention_dim_t5 * 4,
69
+ out_features=cross_attention_dim,
70
+ act_fn="silu_fp32",
71
+ )
72
+
73
+ self.text_embedding_padding = nn.Parameter(
74
+ torch.randn(text_len + text_len_t5, cross_attention_dim, dtype=torch.float32)
75
+ )
76
+
77
+ self.pos_embed = PatchEmbed(
78
+ height=sample_size,
79
+ width=sample_size,
80
+ in_channels=in_channels,
81
+ embed_dim=hidden_size,
82
+ patch_size=patch_size,
83
+ pos_embed_type=None,
84
+ )
85
+
86
+ self.time_extra_emb = HunyuanCombinedTimestepTextSizeStyleEmbedding(
87
+ hidden_size,
88
+ pooled_projection_dim=pooled_projection_dim,
89
+ seq_len=text_len_t5,
90
+ cross_attention_dim=cross_attention_dim_t5,
91
+ use_style_cond_and_image_meta_size=use_style_cond_and_image_meta_size,
92
+ )
93
+
94
+ # controlnet_blocks
95
+ self.controlnet_blocks = nn.ModuleList([])
96
+
97
+ # HunyuanDiT Blocks
98
+ self.blocks = nn.ModuleList(
99
+ [
100
+ HunyuanDiTBlock(
101
+ dim=self.inner_dim,
102
+ num_attention_heads=self.config.num_attention_heads,
103
+ activation_fn=activation_fn,
104
+ ff_inner_dim=int(self.inner_dim * mlp_ratio),
105
+ cross_attention_dim=cross_attention_dim,
106
+ qk_norm=True, # See https://huggingface.co/papers/2302.05442 for details.
107
+ skip=False, # always False as it is the first half of the model
108
+ )
109
+ for layer in range(transformer_num_layers // 2 - 1)
110
+ ]
111
+ )
112
+ self.input_block = zero_module(nn.Linear(hidden_size, hidden_size))
113
+ for _ in range(len(self.blocks)):
114
+ controlnet_block = nn.Linear(hidden_size, hidden_size)
115
+ controlnet_block = zero_module(controlnet_block)
116
+ self.controlnet_blocks.append(controlnet_block)
117
+
118
+ @property
119
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
120
+ r"""
121
+ Returns:
122
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
123
+ indexed by its weight name.
124
+ """
125
+ # set recursively
126
+ processors = {}
127
+
128
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
129
+ if hasattr(module, "get_processor"):
130
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
131
+
132
+ for sub_name, child in module.named_children():
133
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
134
+
135
+ return processors
136
+
137
+ for name, module in self.named_children():
138
+ fn_recursive_add_processors(name, module, processors)
139
+
140
+ return processors
141
+
142
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
143
+ r"""
144
+ Sets the attention processor to use to compute attention.
145
+
146
+ Parameters:
147
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
148
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
149
+ for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the
150
+ corresponding cross attention processor. This is strongly recommended when setting trainable attention
151
+ processors.
152
+ """
153
+ count = len(self.attn_processors.keys())
154
+
155
+ if isinstance(processor, dict) and len(processor) != count:
156
+ raise ValueError(
157
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
158
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
159
+ )
160
+
161
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
162
+ if hasattr(module, "set_processor"):
163
+ if not isinstance(processor, dict):
164
+ module.set_processor(processor)
165
+ else:
166
+ module.set_processor(processor.pop(f"{name}.processor"))
167
+
168
+ for sub_name, child in module.named_children():
169
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
170
+
171
+ for name, module in self.named_children():
172
+ fn_recursive_attn_processor(name, module, processor)
173
+
174
+ @classmethod
175
+ def from_transformer(
176
+ cls, transformer, conditioning_channels=3, transformer_num_layers=None, load_weights_from_transformer=True
177
+ ):
178
+ config = transformer.config
179
+ activation_fn = config.activation_fn
180
+ attention_head_dim = config.attention_head_dim
181
+ cross_attention_dim = config.cross_attention_dim
182
+ cross_attention_dim_t5 = config.cross_attention_dim_t5
183
+ hidden_size = config.hidden_size
184
+ in_channels = config.in_channels
185
+ mlp_ratio = config.mlp_ratio
186
+ num_attention_heads = config.num_attention_heads
187
+ patch_size = config.patch_size
188
+ sample_size = config.sample_size
189
+ text_len = config.text_len
190
+ text_len_t5 = config.text_len_t5
191
+
192
+ conditioning_channels = conditioning_channels
193
+ transformer_num_layers = transformer_num_layers or config.transformer_num_layers
194
+
195
+ controlnet = cls(
196
+ conditioning_channels=conditioning_channels,
197
+ transformer_num_layers=transformer_num_layers,
198
+ activation_fn=activation_fn,
199
+ attention_head_dim=attention_head_dim,
200
+ cross_attention_dim=cross_attention_dim,
201
+ cross_attention_dim_t5=cross_attention_dim_t5,
202
+ hidden_size=hidden_size,
203
+ in_channels=in_channels,
204
+ mlp_ratio=mlp_ratio,
205
+ num_attention_heads=num_attention_heads,
206
+ patch_size=patch_size,
207
+ sample_size=sample_size,
208
+ text_len=text_len,
209
+ text_len_t5=text_len_t5,
210
+ )
211
+ if load_weights_from_transformer:
212
+ key = controlnet.load_state_dict(transformer.state_dict(), strict=False)
213
+ logger.warning(f"controlnet load from Hunyuan-DiT. missing_keys: {key[0]}")
214
+ return controlnet
215
+
216
+ def forward(
217
+ self,
218
+ hidden_states,
219
+ timestep,
220
+ controlnet_cond: torch.Tensor,
221
+ conditioning_scale: float = 1.0,
222
+ encoder_hidden_states=None,
223
+ text_embedding_mask=None,
224
+ encoder_hidden_states_t5=None,
225
+ text_embedding_mask_t5=None,
226
+ image_meta_size=None,
227
+ style=None,
228
+ image_rotary_emb=None,
229
+ return_dict=True,
230
+ ):
231
+ """
232
+ The [`HunyuanDiT2DControlNetModel`] forward method.
233
+
234
+ Args:
235
+ hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`):
236
+ The input tensor.
237
+ timestep ( `torch.LongTensor`, *optional*):
238
+ Used to indicate denoising step.
239
+ controlnet_cond ( `torch.Tensor` ):
240
+ The conditioning input to ControlNet.
241
+ conditioning_scale ( `float` ):
242
+ Indicate the conditioning scale.
243
+ encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
244
+ Conditional embeddings for cross attention layer. This is the output of `BertModel`.
245
+ text_embedding_mask: torch.Tensor
246
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
247
+ of `BertModel`.
248
+ encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
249
+ Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder.
250
+ text_embedding_mask_t5: torch.Tensor
251
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
252
+ of T5 Text Encoder.
253
+ image_meta_size (torch.Tensor):
254
+ Conditional embedding indicate the image sizes
255
+ style: torch.Tensor:
256
+ Conditional embedding indicate the style
257
+ image_rotary_emb (`torch.Tensor`):
258
+ The image rotary embeddings to apply on query and key tensors during attention calculation.
259
+ return_dict: bool
260
+ Whether to return a dictionary.
261
+ """
262
+
263
+ height, width = hidden_states.shape[-2:]
264
+
265
+ hidden_states = self.pos_embed(hidden_states) # b,c,H,W -> b, N, C
266
+
267
+ # 2. pre-process
268
+ hidden_states = hidden_states + self.input_block(self.pos_embed(controlnet_cond))
269
+
270
+ temb = self.time_extra_emb(
271
+ timestep, encoder_hidden_states_t5, image_meta_size, style, hidden_dtype=timestep.dtype
272
+ ) # [B, D]
273
+
274
+ # text projection
275
+ batch_size, sequence_length, _ = encoder_hidden_states_t5.shape
276
+ encoder_hidden_states_t5 = self.text_embedder(
277
+ encoder_hidden_states_t5.view(-1, encoder_hidden_states_t5.shape[-1])
278
+ )
279
+ encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, sequence_length, -1)
280
+
281
+ encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1)
282
+ text_embedding_mask = torch.cat([text_embedding_mask, text_embedding_mask_t5], dim=-1)
283
+ text_embedding_mask = text_embedding_mask.unsqueeze(2).bool()
284
+
285
+ encoder_hidden_states = torch.where(text_embedding_mask, encoder_hidden_states, self.text_embedding_padding)
286
+
287
+ block_res_samples = ()
288
+ for layer, block in enumerate(self.blocks):
289
+ hidden_states = block(
290
+ hidden_states,
291
+ temb=temb,
292
+ encoder_hidden_states=encoder_hidden_states,
293
+ image_rotary_emb=image_rotary_emb,
294
+ ) # (N, L, D)
295
+
296
+ block_res_samples = block_res_samples + (hidden_states,)
297
+
298
+ controlnet_block_res_samples = ()
299
+ for block_res_sample, controlnet_block in zip(block_res_samples, self.controlnet_blocks):
300
+ block_res_sample = controlnet_block(block_res_sample)
301
+ controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,)
302
+
303
+ # 6. scaling
304
+ controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples]
305
+
306
+ if not return_dict:
307
+ return (controlnet_block_res_samples,)
308
+
309
+ return HunyuanControlNetOutput(controlnet_block_samples=controlnet_block_res_samples)
310
+
311
+
312
+ class HunyuanDiT2DMultiControlNetModel(ModelMixin):
313
+ r"""
314
+ `HunyuanDiT2DMultiControlNetModel` wrapper class for Multi-HunyuanDiT2DControlNetModel
315
+
316
+ This module is a wrapper for multiple instances of the `HunyuanDiT2DControlNetModel`. The `forward()` API is
317
+ designed to be compatible with `HunyuanDiT2DControlNetModel`.
318
+
319
+ Args:
320
+ controlnets (`List[HunyuanDiT2DControlNetModel]`):
321
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
322
+ `HunyuanDiT2DControlNetModel` as a list.
323
+ """
324
+
325
+ def __init__(self, controlnets):
326
+ super().__init__()
327
+ self.nets = nn.ModuleList(controlnets)
328
+
329
+ def forward(
330
+ self,
331
+ hidden_states,
332
+ timestep,
333
+ controlnet_cond: torch.Tensor,
334
+ conditioning_scale: float = 1.0,
335
+ encoder_hidden_states=None,
336
+ text_embedding_mask=None,
337
+ encoder_hidden_states_t5=None,
338
+ text_embedding_mask_t5=None,
339
+ image_meta_size=None,
340
+ style=None,
341
+ image_rotary_emb=None,
342
+ return_dict=True,
343
+ ):
344
+ """
345
+ The [`HunyuanDiT2DControlNetModel`] forward method.
346
+
347
+ Args:
348
+ hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`):
349
+ The input tensor.
350
+ timestep ( `torch.LongTensor`, *optional*):
351
+ Used to indicate denoising step.
352
+ controlnet_cond ( `torch.Tensor` ):
353
+ The conditioning input to ControlNet.
354
+ conditioning_scale ( `float` ):
355
+ Indicate the conditioning scale.
356
+ encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
357
+ Conditional embeddings for cross attention layer. This is the output of `BertModel`.
358
+ text_embedding_mask: torch.Tensor
359
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
360
+ of `BertModel`.
361
+ encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
362
+ Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder.
363
+ text_embedding_mask_t5: torch.Tensor
364
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
365
+ of T5 Text Encoder.
366
+ image_meta_size (torch.Tensor):
367
+ Conditional embedding indicate the image sizes
368
+ style: torch.Tensor:
369
+ Conditional embedding indicate the style
370
+ image_rotary_emb (`torch.Tensor`):
371
+ The image rotary embeddings to apply on query and key tensors during attention calculation.
372
+ return_dict: bool
373
+ Whether to return a dictionary.
374
+ """
375
+ for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
376
+ block_samples = controlnet(
377
+ hidden_states=hidden_states,
378
+ timestep=timestep,
379
+ controlnet_cond=image,
380
+ conditioning_scale=scale,
381
+ encoder_hidden_states=encoder_hidden_states,
382
+ text_embedding_mask=text_embedding_mask,
383
+ encoder_hidden_states_t5=encoder_hidden_states_t5,
384
+ text_embedding_mask_t5=text_embedding_mask_t5,
385
+ image_meta_size=image_meta_size,
386
+ style=style,
387
+ image_rotary_emb=image_rotary_emb,
388
+ return_dict=return_dict,
389
+ )
390
+
391
+ # merge samples
392
+ if i == 0:
393
+ control_block_samples = block_samples
394
+ else:
395
+ control_block_samples = [
396
+ control_block_sample + block_sample
397
+ for control_block_sample, block_sample in zip(control_block_samples[0], block_samples[0])
398
+ ]
399
+ control_block_samples = (control_block_samples,)
400
+
401
+ return control_block_samples
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_qwenimage.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
23
+ from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers
24
+ from ..attention_processor import AttentionProcessor
25
+ from ..cache_utils import CacheMixin
26
+ from ..controlnets.controlnet import zero_module
27
+ from ..modeling_outputs import Transformer2DModelOutput
28
+ from ..modeling_utils import ModelMixin
29
+ from ..transformers.transformer_qwenimage import (
30
+ QwenEmbedRope,
31
+ QwenImageTransformerBlock,
32
+ QwenTimestepProjEmbeddings,
33
+ RMSNorm,
34
+ )
35
+
36
+
37
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
38
+
39
+
40
+ @dataclass
41
+ class QwenImageControlNetOutput(BaseOutput):
42
+ controlnet_block_samples: Tuple[torch.Tensor]
43
+
44
+
45
+ class QwenImageControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin):
46
+ _supports_gradient_checkpointing = True
47
+
48
+ @register_to_config
49
+ def __init__(
50
+ self,
51
+ patch_size: int = 2,
52
+ in_channels: int = 64,
53
+ out_channels: Optional[int] = 16,
54
+ num_layers: int = 60,
55
+ attention_head_dim: int = 128,
56
+ num_attention_heads: int = 24,
57
+ joint_attention_dim: int = 3584,
58
+ axes_dims_rope: Tuple[int, int, int] = (16, 56, 56),
59
+ extra_condition_channels: int = 0, # for controlnet-inpainting
60
+ ):
61
+ super().__init__()
62
+ self.out_channels = out_channels or in_channels
63
+ self.inner_dim = num_attention_heads * attention_head_dim
64
+
65
+ self.pos_embed = QwenEmbedRope(theta=10000, axes_dim=list(axes_dims_rope), scale_rope=True)
66
+
67
+ self.time_text_embed = QwenTimestepProjEmbeddings(embedding_dim=self.inner_dim)
68
+
69
+ self.txt_norm = RMSNorm(joint_attention_dim, eps=1e-6)
70
+
71
+ self.img_in = nn.Linear(in_channels, self.inner_dim)
72
+ self.txt_in = nn.Linear(joint_attention_dim, self.inner_dim)
73
+
74
+ self.transformer_blocks = nn.ModuleList(
75
+ [
76
+ QwenImageTransformerBlock(
77
+ dim=self.inner_dim,
78
+ num_attention_heads=num_attention_heads,
79
+ attention_head_dim=attention_head_dim,
80
+ )
81
+ for _ in range(num_layers)
82
+ ]
83
+ )
84
+
85
+ # controlnet_blocks
86
+ self.controlnet_blocks = nn.ModuleList([])
87
+ for _ in range(len(self.transformer_blocks)):
88
+ self.controlnet_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim)))
89
+ self.controlnet_x_embedder = zero_module(
90
+ torch.nn.Linear(in_channels + extra_condition_channels, self.inner_dim)
91
+ )
92
+
93
+ self.gradient_checkpointing = False
94
+
95
+ @property
96
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
97
+ def attn_processors(self):
98
+ r"""
99
+ Returns:
100
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
101
+ indexed by its weight name.
102
+ """
103
+ # set recursively
104
+ processors = {}
105
+
106
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
107
+ if hasattr(module, "get_processor"):
108
+ processors[f"{name}.processor"] = module.get_processor()
109
+
110
+ for sub_name, child in module.named_children():
111
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
112
+
113
+ return processors
114
+
115
+ for name, module in self.named_children():
116
+ fn_recursive_add_processors(name, module, processors)
117
+
118
+ return processors
119
+
120
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
121
+ def set_attn_processor(self, processor):
122
+ r"""
123
+ Sets the attention processor to use to compute attention.
124
+
125
+ Parameters:
126
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
127
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
128
+ for **all** `Attention` layers.
129
+
130
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
131
+ processor. This is strongly recommended when setting trainable attention processors.
132
+
133
+ """
134
+ count = len(self.attn_processors.keys())
135
+
136
+ if isinstance(processor, dict) and len(processor) != count:
137
+ raise ValueError(
138
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
139
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
140
+ )
141
+
142
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
143
+ if hasattr(module, "set_processor"):
144
+ if not isinstance(processor, dict):
145
+ module.set_processor(processor)
146
+ else:
147
+ module.set_processor(processor.pop(f"{name}.processor"))
148
+
149
+ for sub_name, child in module.named_children():
150
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
151
+
152
+ for name, module in self.named_children():
153
+ fn_recursive_attn_processor(name, module, processor)
154
+
155
+ @classmethod
156
+ def from_transformer(
157
+ cls,
158
+ transformer,
159
+ num_layers: int = 5,
160
+ attention_head_dim: int = 128,
161
+ num_attention_heads: int = 24,
162
+ load_weights_from_transformer=True,
163
+ extra_condition_channels: int = 0,
164
+ ):
165
+ config = dict(transformer.config)
166
+ config["num_layers"] = num_layers
167
+ config["attention_head_dim"] = attention_head_dim
168
+ config["num_attention_heads"] = num_attention_heads
169
+ config["extra_condition_channels"] = extra_condition_channels
170
+
171
+ controlnet = cls.from_config(config)
172
+
173
+ if load_weights_from_transformer:
174
+ controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict())
175
+ controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict())
176
+ controlnet.img_in.load_state_dict(transformer.img_in.state_dict())
177
+ controlnet.txt_in.load_state_dict(transformer.txt_in.state_dict())
178
+ controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict(), strict=False)
179
+ controlnet.controlnet_x_embedder = zero_module(controlnet.controlnet_x_embedder)
180
+
181
+ return controlnet
182
+
183
+ def forward(
184
+ self,
185
+ hidden_states: torch.Tensor,
186
+ controlnet_cond: torch.Tensor,
187
+ conditioning_scale: float = 1.0,
188
+ encoder_hidden_states: torch.Tensor = None,
189
+ encoder_hidden_states_mask: torch.Tensor = None,
190
+ timestep: torch.LongTensor = None,
191
+ img_shapes: Optional[List[Tuple[int, int, int]]] = None,
192
+ txt_seq_lens: Optional[List[int]] = None,
193
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
194
+ return_dict: bool = True,
195
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
196
+ """
197
+ The [`FluxTransformer2DModel`] forward method.
198
+
199
+ Args:
200
+ hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`):
201
+ Input `hidden_states`.
202
+ controlnet_cond (`torch.Tensor`):
203
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
204
+ conditioning_scale (`float`, defaults to `1.0`):
205
+ The scale factor for ControlNet outputs.
206
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`):
207
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
208
+ pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected
209
+ from the embeddings of input conditions.
210
+ timestep ( `torch.LongTensor`):
211
+ Used to indicate denoising step.
212
+ block_controlnet_hidden_states: (`list` of `torch.Tensor`):
213
+ A list of tensors that if specified are added to the residuals of transformer blocks.
214
+ joint_attention_kwargs (`dict`, *optional*):
215
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
216
+ `self.processor` in
217
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
218
+ return_dict (`bool`, *optional*, defaults to `True`):
219
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
220
+ tuple.
221
+
222
+ Returns:
223
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
224
+ `tuple` where the first element is the sample tensor.
225
+ """
226
+ if joint_attention_kwargs is not None:
227
+ joint_attention_kwargs = joint_attention_kwargs.copy()
228
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
229
+ else:
230
+ lora_scale = 1.0
231
+
232
+ if USE_PEFT_BACKEND:
233
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
234
+ scale_lora_layers(self, lora_scale)
235
+ else:
236
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
237
+ logger.warning(
238
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
239
+ )
240
+ hidden_states = self.img_in(hidden_states)
241
+
242
+ # add
243
+ hidden_states = hidden_states + self.controlnet_x_embedder(controlnet_cond)
244
+
245
+ temb = self.time_text_embed(timestep, hidden_states)
246
+
247
+ image_rotary_emb = self.pos_embed(img_shapes, txt_seq_lens, device=hidden_states.device)
248
+
249
+ timestep = timestep.to(hidden_states.dtype)
250
+ encoder_hidden_states = self.txt_norm(encoder_hidden_states)
251
+ encoder_hidden_states = self.txt_in(encoder_hidden_states)
252
+
253
+ block_samples = ()
254
+ for index_block, block in enumerate(self.transformer_blocks):
255
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
256
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
257
+ block,
258
+ hidden_states,
259
+ encoder_hidden_states,
260
+ encoder_hidden_states_mask,
261
+ temb,
262
+ image_rotary_emb,
263
+ )
264
+
265
+ else:
266
+ encoder_hidden_states, hidden_states = block(
267
+ hidden_states=hidden_states,
268
+ encoder_hidden_states=encoder_hidden_states,
269
+ encoder_hidden_states_mask=encoder_hidden_states_mask,
270
+ temb=temb,
271
+ image_rotary_emb=image_rotary_emb,
272
+ joint_attention_kwargs=joint_attention_kwargs,
273
+ )
274
+ block_samples = block_samples + (hidden_states,)
275
+
276
+ # controlnet block
277
+ controlnet_block_samples = ()
278
+ for block_sample, controlnet_block in zip(block_samples, self.controlnet_blocks):
279
+ block_sample = controlnet_block(block_sample)
280
+ controlnet_block_samples = controlnet_block_samples + (block_sample,)
281
+
282
+ # scaling
283
+ controlnet_block_samples = [sample * conditioning_scale for sample in controlnet_block_samples]
284
+ controlnet_block_samples = None if len(controlnet_block_samples) == 0 else controlnet_block_samples
285
+
286
+ if USE_PEFT_BACKEND:
287
+ # remove `lora_scale` from each PEFT layer
288
+ unscale_lora_layers(self, lora_scale)
289
+
290
+ if not return_dict:
291
+ return controlnet_block_samples
292
+
293
+ return QwenImageControlNetOutput(
294
+ controlnet_block_samples=controlnet_block_samples,
295
+ )
296
+
297
+
298
+ class QwenImageMultiControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin):
299
+ r"""
300
+ `QwenImageMultiControlNetModel` wrapper class for Multi-QwenImageControlNetModel
301
+
302
+ This module is a wrapper for multiple instances of the `QwenImageControlNetModel`. The `forward()` API is designed
303
+ to be compatible with `QwenImageControlNetModel`.
304
+
305
+ Args:
306
+ controlnets (`List[QwenImageControlNetModel]`):
307
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
308
+ `QwenImageControlNetModel` as a list.
309
+ """
310
+
311
+ def __init__(self, controlnets):
312
+ super().__init__()
313
+ self.nets = nn.ModuleList(controlnets)
314
+
315
+ def forward(
316
+ self,
317
+ hidden_states: torch.FloatTensor,
318
+ controlnet_cond: List[torch.tensor],
319
+ conditioning_scale: List[float],
320
+ encoder_hidden_states: torch.Tensor = None,
321
+ encoder_hidden_states_mask: torch.Tensor = None,
322
+ timestep: torch.LongTensor = None,
323
+ img_shapes: Optional[List[Tuple[int, int, int]]] = None,
324
+ txt_seq_lens: Optional[List[int]] = None,
325
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
326
+ return_dict: bool = True,
327
+ ) -> Union[QwenImageControlNetOutput, Tuple]:
328
+ # ControlNet-Union with multiple conditions
329
+ # only load one ControlNet for saving memories
330
+ if len(self.nets) == 1:
331
+ controlnet = self.nets[0]
332
+
333
+ for i, (image, scale) in enumerate(zip(controlnet_cond, conditioning_scale)):
334
+ block_samples = controlnet(
335
+ hidden_states=hidden_states,
336
+ controlnet_cond=image,
337
+ conditioning_scale=scale,
338
+ encoder_hidden_states=encoder_hidden_states,
339
+ encoder_hidden_states_mask=encoder_hidden_states_mask,
340
+ timestep=timestep,
341
+ img_shapes=img_shapes,
342
+ txt_seq_lens=txt_seq_lens,
343
+ joint_attention_kwargs=joint_attention_kwargs,
344
+ return_dict=return_dict,
345
+ )
346
+
347
+ # merge samples
348
+ if i == 0:
349
+ control_block_samples = block_samples
350
+ else:
351
+ if block_samples is not None and control_block_samples is not None:
352
+ control_block_samples = [
353
+ control_block_sample + block_sample
354
+ for control_block_sample, block_sample in zip(control_block_samples, block_samples)
355
+ ]
356
+ else:
357
+ raise ValueError("QwenImageMultiControlNetModel only supports a single controlnet-union now.")
358
+
359
+ return control_block_samples
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_sana.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from typing import Any, Dict, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders import PeftAdapterMixin
23
+ from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers
24
+ from ..attention_processor import AttentionProcessor
25
+ from ..embeddings import PatchEmbed, PixArtAlphaTextProjection
26
+ from ..modeling_outputs import Transformer2DModelOutput
27
+ from ..modeling_utils import ModelMixin
28
+ from ..normalization import AdaLayerNormSingle, RMSNorm
29
+ from ..transformers.sana_transformer import SanaTransformerBlock
30
+ from .controlnet import zero_module
31
+
32
+
33
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
+
35
+
36
+ @dataclass
37
+ class SanaControlNetOutput(BaseOutput):
38
+ controlnet_block_samples: Tuple[torch.Tensor]
39
+
40
+
41
+ class SanaControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
42
+ _supports_gradient_checkpointing = True
43
+ _no_split_modules = ["SanaTransformerBlock", "PatchEmbed"]
44
+ _skip_layerwise_casting_patterns = ["patch_embed", "norm"]
45
+
46
+ @register_to_config
47
+ def __init__(
48
+ self,
49
+ in_channels: int = 32,
50
+ out_channels: Optional[int] = 32,
51
+ num_attention_heads: int = 70,
52
+ attention_head_dim: int = 32,
53
+ num_layers: int = 7,
54
+ num_cross_attention_heads: Optional[int] = 20,
55
+ cross_attention_head_dim: Optional[int] = 112,
56
+ cross_attention_dim: Optional[int] = 2240,
57
+ caption_channels: int = 2304,
58
+ mlp_ratio: float = 2.5,
59
+ dropout: float = 0.0,
60
+ attention_bias: bool = False,
61
+ sample_size: int = 32,
62
+ patch_size: int = 1,
63
+ norm_elementwise_affine: bool = False,
64
+ norm_eps: float = 1e-6,
65
+ interpolation_scale: Optional[int] = None,
66
+ ) -> None:
67
+ super().__init__()
68
+
69
+ out_channels = out_channels or in_channels
70
+ inner_dim = num_attention_heads * attention_head_dim
71
+
72
+ # 1. Patch Embedding
73
+ self.patch_embed = PatchEmbed(
74
+ height=sample_size,
75
+ width=sample_size,
76
+ patch_size=patch_size,
77
+ in_channels=in_channels,
78
+ embed_dim=inner_dim,
79
+ interpolation_scale=interpolation_scale,
80
+ pos_embed_type="sincos" if interpolation_scale is not None else None,
81
+ )
82
+
83
+ # 2. Additional condition embeddings
84
+ self.time_embed = AdaLayerNormSingle(inner_dim)
85
+
86
+ self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim)
87
+ self.caption_norm = RMSNorm(inner_dim, eps=1e-5, elementwise_affine=True)
88
+
89
+ # 3. Transformer blocks
90
+ self.transformer_blocks = nn.ModuleList(
91
+ [
92
+ SanaTransformerBlock(
93
+ inner_dim,
94
+ num_attention_heads,
95
+ attention_head_dim,
96
+ dropout=dropout,
97
+ num_cross_attention_heads=num_cross_attention_heads,
98
+ cross_attention_head_dim=cross_attention_head_dim,
99
+ cross_attention_dim=cross_attention_dim,
100
+ attention_bias=attention_bias,
101
+ norm_elementwise_affine=norm_elementwise_affine,
102
+ norm_eps=norm_eps,
103
+ mlp_ratio=mlp_ratio,
104
+ )
105
+ for _ in range(num_layers)
106
+ ]
107
+ )
108
+
109
+ # controlnet_blocks
110
+ self.controlnet_blocks = nn.ModuleList([])
111
+
112
+ self.input_block = zero_module(nn.Linear(inner_dim, inner_dim))
113
+ for _ in range(len(self.transformer_blocks)):
114
+ controlnet_block = nn.Linear(inner_dim, inner_dim)
115
+ controlnet_block = zero_module(controlnet_block)
116
+ self.controlnet_blocks.append(controlnet_block)
117
+
118
+ self.gradient_checkpointing = False
119
+
120
+ @property
121
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
122
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
123
+ r"""
124
+ Returns:
125
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
126
+ indexed by its weight name.
127
+ """
128
+ # set recursively
129
+ processors = {}
130
+
131
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
132
+ if hasattr(module, "get_processor"):
133
+ processors[f"{name}.processor"] = module.get_processor()
134
+
135
+ for sub_name, child in module.named_children():
136
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
137
+
138
+ return processors
139
+
140
+ for name, module in self.named_children():
141
+ fn_recursive_add_processors(name, module, processors)
142
+
143
+ return processors
144
+
145
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
146
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
147
+ r"""
148
+ Sets the attention processor to use to compute attention.
149
+
150
+ Parameters:
151
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
152
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
153
+ for **all** `Attention` layers.
154
+
155
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
156
+ processor. This is strongly recommended when setting trainable attention processors.
157
+
158
+ """
159
+ count = len(self.attn_processors.keys())
160
+
161
+ if isinstance(processor, dict) and len(processor) != count:
162
+ raise ValueError(
163
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
164
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
165
+ )
166
+
167
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
168
+ if hasattr(module, "set_processor"):
169
+ if not isinstance(processor, dict):
170
+ module.set_processor(processor)
171
+ else:
172
+ module.set_processor(processor.pop(f"{name}.processor"))
173
+
174
+ for sub_name, child in module.named_children():
175
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
176
+
177
+ for name, module in self.named_children():
178
+ fn_recursive_attn_processor(name, module, processor)
179
+
180
+ def forward(
181
+ self,
182
+ hidden_states: torch.Tensor,
183
+ encoder_hidden_states: torch.Tensor,
184
+ timestep: torch.LongTensor,
185
+ controlnet_cond: torch.Tensor,
186
+ conditioning_scale: float = 1.0,
187
+ encoder_attention_mask: Optional[torch.Tensor] = None,
188
+ attention_mask: Optional[torch.Tensor] = None,
189
+ attention_kwargs: Optional[Dict[str, Any]] = None,
190
+ return_dict: bool = True,
191
+ ) -> Union[Tuple[torch.Tensor, ...], Transformer2DModelOutput]:
192
+ if attention_kwargs is not None:
193
+ attention_kwargs = attention_kwargs.copy()
194
+ lora_scale = attention_kwargs.pop("scale", 1.0)
195
+ else:
196
+ lora_scale = 1.0
197
+
198
+ if USE_PEFT_BACKEND:
199
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
200
+ scale_lora_layers(self, lora_scale)
201
+ else:
202
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
203
+ logger.warning(
204
+ "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective."
205
+ )
206
+
207
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
208
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
209
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
210
+ # expects mask of shape:
211
+ # [batch, key_tokens]
212
+ # adds singleton query_tokens dimension:
213
+ # [batch, 1, key_tokens]
214
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
215
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
216
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
217
+ if attention_mask is not None and attention_mask.ndim == 2:
218
+ # assume that mask is expressed as:
219
+ # (1 = keep, 0 = discard)
220
+ # convert mask into a bias that can be added to attention scores:
221
+ # (keep = +0, discard = -10000.0)
222
+ attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
223
+ attention_mask = attention_mask.unsqueeze(1)
224
+
225
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
226
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
227
+ encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
228
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
229
+
230
+ # 1. Input
231
+ batch_size, num_channels, height, width = hidden_states.shape
232
+ p = self.config.patch_size
233
+ post_patch_height, post_patch_width = height // p, width // p
234
+
235
+ hidden_states = self.patch_embed(hidden_states)
236
+ hidden_states = hidden_states + self.input_block(self.patch_embed(controlnet_cond.to(hidden_states.dtype)))
237
+
238
+ timestep, embedded_timestep = self.time_embed(
239
+ timestep, batch_size=batch_size, hidden_dtype=hidden_states.dtype
240
+ )
241
+
242
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states)
243
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])
244
+
245
+ encoder_hidden_states = self.caption_norm(encoder_hidden_states)
246
+
247
+ # 2. Transformer blocks
248
+ block_res_samples = ()
249
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
250
+ for block in self.transformer_blocks:
251
+ hidden_states = self._gradient_checkpointing_func(
252
+ block,
253
+ hidden_states,
254
+ attention_mask,
255
+ encoder_hidden_states,
256
+ encoder_attention_mask,
257
+ timestep,
258
+ post_patch_height,
259
+ post_patch_width,
260
+ )
261
+ block_res_samples = block_res_samples + (hidden_states,)
262
+ else:
263
+ for block in self.transformer_blocks:
264
+ hidden_states = block(
265
+ hidden_states,
266
+ attention_mask,
267
+ encoder_hidden_states,
268
+ encoder_attention_mask,
269
+ timestep,
270
+ post_patch_height,
271
+ post_patch_width,
272
+ )
273
+ block_res_samples = block_res_samples + (hidden_states,)
274
+
275
+ # 3. ControlNet blocks
276
+ controlnet_block_res_samples = ()
277
+ for block_res_sample, controlnet_block in zip(block_res_samples, self.controlnet_blocks):
278
+ block_res_sample = controlnet_block(block_res_sample)
279
+ controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,)
280
+
281
+ if USE_PEFT_BACKEND:
282
+ # remove `lora_scale` from each PEFT layer
283
+ unscale_lora_layers(self, lora_scale)
284
+
285
+ controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples]
286
+
287
+ if not return_dict:
288
+ return (controlnet_block_res_samples,)
289
+
290
+ return SanaControlNetOutput(controlnet_block_samples=controlnet_block_res_samples)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_sd3.py ADDED
@@ -0,0 +1,513 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from dataclasses import dataclass
17
+ from typing import Any, Dict, List, Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
24
+ from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
25
+ from ..attention import JointTransformerBlock
26
+ from ..attention_processor import Attention, AttentionProcessor, FusedJointAttnProcessor2_0
27
+ from ..embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed
28
+ from ..modeling_outputs import Transformer2DModelOutput
29
+ from ..modeling_utils import ModelMixin
30
+ from ..transformers.transformer_sd3 import SD3SingleTransformerBlock
31
+ from .controlnet import BaseOutput, zero_module
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ @dataclass
38
+ class SD3ControlNetOutput(BaseOutput):
39
+ controlnet_block_samples: Tuple[torch.Tensor]
40
+
41
+
42
+ class SD3ControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
43
+ r"""
44
+ ControlNet model for [Stable Diffusion 3](https://huggingface.co/papers/2403.03206).
45
+
46
+ Parameters:
47
+ sample_size (`int`, defaults to `128`):
48
+ The width/height of the latents. This is fixed during training since it is used to learn a number of
49
+ position embeddings.
50
+ patch_size (`int`, defaults to `2`):
51
+ Patch size to turn the input data into small patches.
52
+ in_channels (`int`, defaults to `16`):
53
+ The number of latent channels in the input.
54
+ num_layers (`int`, defaults to `18`):
55
+ The number of layers of transformer blocks to use.
56
+ attention_head_dim (`int`, defaults to `64`):
57
+ The number of channels in each head.
58
+ num_attention_heads (`int`, defaults to `18`):
59
+ The number of heads to use for multi-head attention.
60
+ joint_attention_dim (`int`, defaults to `4096`):
61
+ The embedding dimension to use for joint text-image attention.
62
+ caption_projection_dim (`int`, defaults to `1152`):
63
+ The embedding dimension of caption embeddings.
64
+ pooled_projection_dim (`int`, defaults to `2048`):
65
+ The embedding dimension of pooled text projections.
66
+ out_channels (`int`, defaults to `16`):
67
+ The number of latent channels in the output.
68
+ pos_embed_max_size (`int`, defaults to `96`):
69
+ The maximum latent height/width of positional embeddings.
70
+ extra_conditioning_channels (`int`, defaults to `0`):
71
+ The number of extra channels to use for conditioning for patch embedding.
72
+ dual_attention_layers (`Tuple[int, ...]`, defaults to `()`):
73
+ The number of dual-stream transformer blocks to use.
74
+ qk_norm (`str`, *optional*, defaults to `None`):
75
+ The normalization to use for query and key in the attention layer. If `None`, no normalization is used.
76
+ pos_embed_type (`str`, defaults to `"sincos"`):
77
+ The type of positional embedding to use. Choose between `"sincos"` and `None`.
78
+ use_pos_embed (`bool`, defaults to `True`):
79
+ Whether to use positional embeddings.
80
+ force_zeros_for_pooled_projection (`bool`, defaults to `True`):
81
+ Whether to force zeros for pooled projection embeddings. This is handled in the pipelines by reading the
82
+ config value of the ControlNet model.
83
+ """
84
+
85
+ _supports_gradient_checkpointing = True
86
+
87
+ @register_to_config
88
+ def __init__(
89
+ self,
90
+ sample_size: int = 128,
91
+ patch_size: int = 2,
92
+ in_channels: int = 16,
93
+ num_layers: int = 18,
94
+ attention_head_dim: int = 64,
95
+ num_attention_heads: int = 18,
96
+ joint_attention_dim: int = 4096,
97
+ caption_projection_dim: int = 1152,
98
+ pooled_projection_dim: int = 2048,
99
+ out_channels: int = 16,
100
+ pos_embed_max_size: int = 96,
101
+ extra_conditioning_channels: int = 0,
102
+ dual_attention_layers: Tuple[int, ...] = (),
103
+ qk_norm: Optional[str] = None,
104
+ pos_embed_type: Optional[str] = "sincos",
105
+ use_pos_embed: bool = True,
106
+ force_zeros_for_pooled_projection: bool = True,
107
+ ):
108
+ super().__init__()
109
+ default_out_channels = in_channels
110
+ self.out_channels = out_channels if out_channels is not None else default_out_channels
111
+ self.inner_dim = num_attention_heads * attention_head_dim
112
+
113
+ if use_pos_embed:
114
+ self.pos_embed = PatchEmbed(
115
+ height=sample_size,
116
+ width=sample_size,
117
+ patch_size=patch_size,
118
+ in_channels=in_channels,
119
+ embed_dim=self.inner_dim,
120
+ pos_embed_max_size=pos_embed_max_size,
121
+ pos_embed_type=pos_embed_type,
122
+ )
123
+ else:
124
+ self.pos_embed = None
125
+ self.time_text_embed = CombinedTimestepTextProjEmbeddings(
126
+ embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim
127
+ )
128
+ if joint_attention_dim is not None:
129
+ self.context_embedder = nn.Linear(joint_attention_dim, caption_projection_dim)
130
+
131
+ # `attention_head_dim` is doubled to account for the mixing.
132
+ # It needs to crafted when we get the actual checkpoints.
133
+ self.transformer_blocks = nn.ModuleList(
134
+ [
135
+ JointTransformerBlock(
136
+ dim=self.inner_dim,
137
+ num_attention_heads=num_attention_heads,
138
+ attention_head_dim=attention_head_dim,
139
+ context_pre_only=False,
140
+ qk_norm=qk_norm,
141
+ use_dual_attention=True if i in dual_attention_layers else False,
142
+ )
143
+ for i in range(num_layers)
144
+ ]
145
+ )
146
+ else:
147
+ self.context_embedder = None
148
+ self.transformer_blocks = nn.ModuleList(
149
+ [
150
+ SD3SingleTransformerBlock(
151
+ dim=self.inner_dim,
152
+ num_attention_heads=num_attention_heads,
153
+ attention_head_dim=attention_head_dim,
154
+ )
155
+ for _ in range(num_layers)
156
+ ]
157
+ )
158
+
159
+ # controlnet_blocks
160
+ self.controlnet_blocks = nn.ModuleList([])
161
+ for _ in range(len(self.transformer_blocks)):
162
+ controlnet_block = nn.Linear(self.inner_dim, self.inner_dim)
163
+ controlnet_block = zero_module(controlnet_block)
164
+ self.controlnet_blocks.append(controlnet_block)
165
+ pos_embed_input = PatchEmbed(
166
+ height=sample_size,
167
+ width=sample_size,
168
+ patch_size=patch_size,
169
+ in_channels=in_channels + extra_conditioning_channels,
170
+ embed_dim=self.inner_dim,
171
+ pos_embed_type=None,
172
+ )
173
+ self.pos_embed_input = zero_module(pos_embed_input)
174
+
175
+ self.gradient_checkpointing = False
176
+
177
+ # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking
178
+ def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None:
179
+ """
180
+ Sets the attention processor to use [feed forward
181
+ chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
182
+
183
+ Parameters:
184
+ chunk_size (`int`, *optional*):
185
+ The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually
186
+ over each tensor of dim=`dim`.
187
+ dim (`int`, *optional*, defaults to `0`):
188
+ The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch)
189
+ or dim=1 (sequence length).
190
+ """
191
+ if dim not in [0, 1]:
192
+ raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}")
193
+
194
+ # By default chunk size is 1
195
+ chunk_size = chunk_size or 1
196
+
197
+ def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
198
+ if hasattr(module, "set_chunk_feed_forward"):
199
+ module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
200
+
201
+ for child in module.children():
202
+ fn_recursive_feed_forward(child, chunk_size, dim)
203
+
204
+ for module in self.children():
205
+ fn_recursive_feed_forward(module, chunk_size, dim)
206
+
207
+ @property
208
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
209
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
210
+ r"""
211
+ Returns:
212
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
213
+ indexed by its weight name.
214
+ """
215
+ # set recursively
216
+ processors = {}
217
+
218
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
219
+ if hasattr(module, "get_processor"):
220
+ processors[f"{name}.processor"] = module.get_processor()
221
+
222
+ for sub_name, child in module.named_children():
223
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
224
+
225
+ return processors
226
+
227
+ for name, module in self.named_children():
228
+ fn_recursive_add_processors(name, module, processors)
229
+
230
+ return processors
231
+
232
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
233
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
234
+ r"""
235
+ Sets the attention processor to use to compute attention.
236
+
237
+ Parameters:
238
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
239
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
240
+ for **all** `Attention` layers.
241
+
242
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
243
+ processor. This is strongly recommended when setting trainable attention processors.
244
+
245
+ """
246
+ count = len(self.attn_processors.keys())
247
+
248
+ if isinstance(processor, dict) and len(processor) != count:
249
+ raise ValueError(
250
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
251
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
252
+ )
253
+
254
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
255
+ if hasattr(module, "set_processor"):
256
+ if not isinstance(processor, dict):
257
+ module.set_processor(processor)
258
+ else:
259
+ module.set_processor(processor.pop(f"{name}.processor"))
260
+
261
+ for sub_name, child in module.named_children():
262
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
263
+
264
+ for name, module in self.named_children():
265
+ fn_recursive_attn_processor(name, module, processor)
266
+
267
+ # Copied from diffusers.models.transformers.transformer_sd3.SD3Transformer2DModel.fuse_qkv_projections
268
+ def fuse_qkv_projections(self):
269
+ """
270
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
271
+ are fused. For cross-attention modules, key and value projection matrices are fused.
272
+
273
+ <Tip warning={true}>
274
+
275
+ This API is 🧪 experimental.
276
+
277
+ </Tip>
278
+ """
279
+ self.original_attn_processors = None
280
+
281
+ for _, attn_processor in self.attn_processors.items():
282
+ if "Added" in str(attn_processor.__class__.__name__):
283
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
284
+
285
+ self.original_attn_processors = self.attn_processors
286
+
287
+ for module in self.modules():
288
+ if isinstance(module, Attention):
289
+ module.fuse_projections(fuse=True)
290
+
291
+ self.set_attn_processor(FusedJointAttnProcessor2_0())
292
+
293
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
294
+ def unfuse_qkv_projections(self):
295
+ """Disables the fused QKV projection if enabled.
296
+
297
+ <Tip warning={true}>
298
+
299
+ This API is 🧪 experimental.
300
+
301
+ </Tip>
302
+
303
+ """
304
+ if self.original_attn_processors is not None:
305
+ self.set_attn_processor(self.original_attn_processors)
306
+
307
+ # Notes: This is for SD3.5 8b controlnet, which shares the pos_embed with the transformer
308
+ # we should have handled this in conversion script
309
+ def _get_pos_embed_from_transformer(self, transformer):
310
+ pos_embed = PatchEmbed(
311
+ height=transformer.config.sample_size,
312
+ width=transformer.config.sample_size,
313
+ patch_size=transformer.config.patch_size,
314
+ in_channels=transformer.config.in_channels,
315
+ embed_dim=transformer.inner_dim,
316
+ pos_embed_max_size=transformer.config.pos_embed_max_size,
317
+ )
318
+ pos_embed.load_state_dict(transformer.pos_embed.state_dict(), strict=True)
319
+ return pos_embed
320
+
321
+ @classmethod
322
+ def from_transformer(
323
+ cls, transformer, num_layers=12, num_extra_conditioning_channels=1, load_weights_from_transformer=True
324
+ ):
325
+ config = transformer.config
326
+ config["num_layers"] = num_layers or config.num_layers
327
+ config["extra_conditioning_channels"] = num_extra_conditioning_channels
328
+ controlnet = cls.from_config(config)
329
+
330
+ if load_weights_from_transformer:
331
+ controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict())
332
+ controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict())
333
+ controlnet.context_embedder.load_state_dict(transformer.context_embedder.state_dict())
334
+ controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict(), strict=False)
335
+
336
+ controlnet.pos_embed_input = zero_module(controlnet.pos_embed_input)
337
+
338
+ return controlnet
339
+
340
+ def forward(
341
+ self,
342
+ hidden_states: torch.Tensor,
343
+ controlnet_cond: torch.Tensor,
344
+ conditioning_scale: float = 1.0,
345
+ encoder_hidden_states: torch.Tensor = None,
346
+ pooled_projections: torch.Tensor = None,
347
+ timestep: torch.LongTensor = None,
348
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
349
+ return_dict: bool = True,
350
+ ) -> Union[torch.Tensor, Transformer2DModelOutput]:
351
+ """
352
+ The [`SD3Transformer2DModel`] forward method.
353
+
354
+ Args:
355
+ hidden_states (`torch.Tensor` of shape `(batch size, channel, height, width)`):
356
+ Input `hidden_states`.
357
+ controlnet_cond (`torch.Tensor`):
358
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
359
+ conditioning_scale (`float`, defaults to `1.0`):
360
+ The scale factor for ControlNet outputs.
361
+ encoder_hidden_states (`torch.Tensor` of shape `(batch size, sequence_len, embed_dims)`):
362
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
363
+ pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected
364
+ from the embeddings of input conditions.
365
+ timestep ( `torch.LongTensor`):
366
+ Used to indicate denoising step.
367
+ joint_attention_kwargs (`dict`, *optional*):
368
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
369
+ `self.processor` in
370
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
371
+ return_dict (`bool`, *optional*, defaults to `True`):
372
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
373
+ tuple.
374
+
375
+ Returns:
376
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
377
+ `tuple` where the first element is the sample tensor.
378
+ """
379
+ if joint_attention_kwargs is not None:
380
+ joint_attention_kwargs = joint_attention_kwargs.copy()
381
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
382
+ else:
383
+ lora_scale = 1.0
384
+
385
+ if USE_PEFT_BACKEND:
386
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
387
+ scale_lora_layers(self, lora_scale)
388
+ else:
389
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
390
+ logger.warning(
391
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
392
+ )
393
+
394
+ if self.pos_embed is not None and hidden_states.ndim != 4:
395
+ raise ValueError("hidden_states must be 4D when pos_embed is used")
396
+
397
+ # SD3.5 8b controlnet does not have a `pos_embed`,
398
+ # it use the `pos_embed` from the transformer to process input before passing to controlnet
399
+ elif self.pos_embed is None and hidden_states.ndim != 3:
400
+ raise ValueError("hidden_states must be 3D when pos_embed is not used")
401
+
402
+ if self.context_embedder is not None and encoder_hidden_states is None:
403
+ raise ValueError("encoder_hidden_states must be provided when context_embedder is used")
404
+ # SD3.5 8b controlnet does not have a `context_embedder`, it does not use `encoder_hidden_states`
405
+ elif self.context_embedder is None and encoder_hidden_states is not None:
406
+ raise ValueError("encoder_hidden_states should not be provided when context_embedder is not used")
407
+
408
+ if self.pos_embed is not None:
409
+ hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
410
+
411
+ temb = self.time_text_embed(timestep, pooled_projections)
412
+
413
+ if self.context_embedder is not None:
414
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
415
+
416
+ # add
417
+ hidden_states = hidden_states + self.pos_embed_input(controlnet_cond)
418
+
419
+ block_res_samples = ()
420
+
421
+ for block in self.transformer_blocks:
422
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
423
+ if self.context_embedder is not None:
424
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
425
+ block,
426
+ hidden_states,
427
+ encoder_hidden_states,
428
+ temb,
429
+ )
430
+ else:
431
+ # SD3.5 8b controlnet use single transformer block, which does not use `encoder_hidden_states`
432
+ hidden_states = self._gradient_checkpointing_func(block, hidden_states, temb)
433
+
434
+ else:
435
+ if self.context_embedder is not None:
436
+ encoder_hidden_states, hidden_states = block(
437
+ hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb
438
+ )
439
+ else:
440
+ # SD3.5 8b controlnet use single transformer block, which does not use `encoder_hidden_states`
441
+ hidden_states = block(hidden_states, temb)
442
+
443
+ block_res_samples = block_res_samples + (hidden_states,)
444
+
445
+ controlnet_block_res_samples = ()
446
+ for block_res_sample, controlnet_block in zip(block_res_samples, self.controlnet_blocks):
447
+ block_res_sample = controlnet_block(block_res_sample)
448
+ controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,)
449
+
450
+ # 6. scaling
451
+ controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples]
452
+
453
+ if USE_PEFT_BACKEND:
454
+ # remove `lora_scale` from each PEFT layer
455
+ unscale_lora_layers(self, lora_scale)
456
+
457
+ if not return_dict:
458
+ return (controlnet_block_res_samples,)
459
+
460
+ return SD3ControlNetOutput(controlnet_block_samples=controlnet_block_res_samples)
461
+
462
+
463
+ class SD3MultiControlNetModel(ModelMixin):
464
+ r"""
465
+ `SD3ControlNetModel` wrapper class for Multi-SD3ControlNet
466
+
467
+ This module is a wrapper for multiple instances of the `SD3ControlNetModel`. The `forward()` API is designed to be
468
+ compatible with `SD3ControlNetModel`.
469
+
470
+ Args:
471
+ controlnets (`List[SD3ControlNetModel]`):
472
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
473
+ `SD3ControlNetModel` as a list.
474
+ """
475
+
476
+ def __init__(self, controlnets):
477
+ super().__init__()
478
+ self.nets = nn.ModuleList(controlnets)
479
+
480
+ def forward(
481
+ self,
482
+ hidden_states: torch.Tensor,
483
+ controlnet_cond: List[torch.tensor],
484
+ conditioning_scale: List[float],
485
+ pooled_projections: torch.Tensor,
486
+ encoder_hidden_states: torch.Tensor = None,
487
+ timestep: torch.LongTensor = None,
488
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
489
+ return_dict: bool = True,
490
+ ) -> Union[SD3ControlNetOutput, Tuple]:
491
+ for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
492
+ block_samples = controlnet(
493
+ hidden_states=hidden_states,
494
+ timestep=timestep,
495
+ encoder_hidden_states=encoder_hidden_states,
496
+ pooled_projections=pooled_projections,
497
+ controlnet_cond=image,
498
+ conditioning_scale=scale,
499
+ joint_attention_kwargs=joint_attention_kwargs,
500
+ return_dict=return_dict,
501
+ )
502
+
503
+ # merge samples
504
+ if i == 0:
505
+ control_block_samples = block_samples
506
+ else:
507
+ control_block_samples = [
508
+ control_block_sample + block_sample
509
+ for control_block_sample, block_sample in zip(control_block_samples[0], block_samples[0])
510
+ ]
511
+ control_block_samples = (tuple(control_block_samples),)
512
+
513
+ return control_block_samples
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_sparsectrl.py ADDED
@@ -0,0 +1,785 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from torch import nn
20
+ from torch.nn import functional as F
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...loaders import FromOriginalModelMixin
24
+ from ...utils import BaseOutput, logging
25
+ from ..attention_processor import (
26
+ ADDED_KV_ATTENTION_PROCESSORS,
27
+ CROSS_ATTENTION_PROCESSORS,
28
+ AttentionProcessor,
29
+ AttnAddedKVProcessor,
30
+ AttnProcessor,
31
+ )
32
+ from ..embeddings import TimestepEmbedding, Timesteps
33
+ from ..modeling_utils import ModelMixin
34
+ from ..unets.unet_2d_blocks import UNetMidBlock2DCrossAttn
35
+ from ..unets.unet_2d_condition import UNet2DConditionModel
36
+ from ..unets.unet_motion_model import CrossAttnDownBlockMotion, DownBlockMotion
37
+
38
+
39
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
+
41
+
42
+ @dataclass
43
+ class SparseControlNetOutput(BaseOutput):
44
+ """
45
+ The output of [`SparseControlNetModel`].
46
+
47
+ Args:
48
+ down_block_res_samples (`tuple[torch.Tensor]`):
49
+ A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should
50
+ be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be
51
+ used to condition the original UNet's downsampling activations.
52
+ mid_down_block_re_sample (`torch.Tensor`):
53
+ The activation of the middle block (the lowest sample resolution). Each tensor should be of shape
54
+ `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`.
55
+ Output can be used to condition the original UNet's middle block activation.
56
+ """
57
+
58
+ down_block_res_samples: Tuple[torch.Tensor]
59
+ mid_block_res_sample: torch.Tensor
60
+
61
+
62
+ class SparseControlNetConditioningEmbedding(nn.Module):
63
+ def __init__(
64
+ self,
65
+ conditioning_embedding_channels: int,
66
+ conditioning_channels: int = 3,
67
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
68
+ ):
69
+ super().__init__()
70
+
71
+ self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
72
+ self.blocks = nn.ModuleList([])
73
+
74
+ for i in range(len(block_out_channels) - 1):
75
+ channel_in = block_out_channels[i]
76
+ channel_out = block_out_channels[i + 1]
77
+ self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
78
+ self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
79
+
80
+ self.conv_out = zero_module(
81
+ nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
82
+ )
83
+
84
+ def forward(self, conditioning: torch.Tensor) -> torch.Tensor:
85
+ embedding = self.conv_in(conditioning)
86
+ embedding = F.silu(embedding)
87
+
88
+ for block in self.blocks:
89
+ embedding = block(embedding)
90
+ embedding = F.silu(embedding)
91
+
92
+ embedding = self.conv_out(embedding)
93
+ return embedding
94
+
95
+
96
+ class SparseControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
97
+ """
98
+ A SparseControlNet model as described in [SparseCtrl: Adding Sparse Controls to Text-to-Video Diffusion
99
+ Models](https://huggingface.co/papers/2311.16933).
100
+
101
+ Args:
102
+ in_channels (`int`, defaults to 4):
103
+ The number of channels in the input sample.
104
+ conditioning_channels (`int`, defaults to 4):
105
+ The number of input channels in the controlnet conditional embedding module. If
106
+ `concat_condition_embedding` is True, the value provided here is incremented by 1.
107
+ flip_sin_to_cos (`bool`, defaults to `True`):
108
+ Whether to flip the sin to cos in the time embedding.
109
+ freq_shift (`int`, defaults to 0):
110
+ The frequency shift to apply to the time embedding.
111
+ down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
112
+ The tuple of downsample blocks to use.
113
+ only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`):
114
+ block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`):
115
+ The tuple of output channels for each block.
116
+ layers_per_block (`int`, defaults to 2):
117
+ The number of layers per block.
118
+ downsample_padding (`int`, defaults to 1):
119
+ The padding to use for the downsampling convolution.
120
+ mid_block_scale_factor (`float`, defaults to 1):
121
+ The scale factor to use for the mid block.
122
+ act_fn (`str`, defaults to "silu"):
123
+ The activation function to use.
124
+ norm_num_groups (`int`, *optional*, defaults to 32):
125
+ The number of groups to use for the normalization. If None, normalization and activation layers is skipped
126
+ in post-processing.
127
+ norm_eps (`float`, defaults to 1e-5):
128
+ The epsilon to use for the normalization.
129
+ cross_attention_dim (`int`, defaults to 1280):
130
+ The dimension of the cross attention features.
131
+ transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
132
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
133
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
134
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
135
+ transformer_layers_per_mid_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
136
+ The number of transformer layers to use in each layer in the middle block.
137
+ attention_head_dim (`int` or `Tuple[int]`, defaults to 8):
138
+ The dimension of the attention heads.
139
+ num_attention_heads (`int` or `Tuple[int]`, *optional*):
140
+ The number of heads to use for multi-head attention.
141
+ use_linear_projection (`bool`, defaults to `False`):
142
+ upcast_attention (`bool`, defaults to `False`):
143
+ resnet_time_scale_shift (`str`, defaults to `"default"`):
144
+ Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`.
145
+ conditioning_embedding_out_channels (`Tuple[int]`, defaults to `(16, 32, 96, 256)`):
146
+ The tuple of output channel for each block in the `conditioning_embedding` layer.
147
+ global_pool_conditions (`bool`, defaults to `False`):
148
+ TODO(Patrick) - unused parameter
149
+ controlnet_conditioning_channel_order (`str`, defaults to `rgb`):
150
+ motion_max_seq_length (`int`, defaults to `32`):
151
+ The maximum sequence length to use in the motion module.
152
+ motion_num_attention_heads (`int` or `Tuple[int]`, defaults to `8`):
153
+ The number of heads to use in each attention layer of the motion module.
154
+ concat_conditioning_mask (`bool`, defaults to `True`):
155
+ use_simplified_condition_embedding (`bool`, defaults to `True`):
156
+ """
157
+
158
+ _supports_gradient_checkpointing = True
159
+
160
+ @register_to_config
161
+ def __init__(
162
+ self,
163
+ in_channels: int = 4,
164
+ conditioning_channels: int = 4,
165
+ flip_sin_to_cos: bool = True,
166
+ freq_shift: int = 0,
167
+ down_block_types: Tuple[str, ...] = (
168
+ "CrossAttnDownBlockMotion",
169
+ "CrossAttnDownBlockMotion",
170
+ "CrossAttnDownBlockMotion",
171
+ "DownBlockMotion",
172
+ ),
173
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
174
+ block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
175
+ layers_per_block: int = 2,
176
+ downsample_padding: int = 1,
177
+ mid_block_scale_factor: float = 1,
178
+ act_fn: str = "silu",
179
+ norm_num_groups: Optional[int] = 32,
180
+ norm_eps: float = 1e-5,
181
+ cross_attention_dim: int = 768,
182
+ transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1,
183
+ transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]] = None,
184
+ temporal_transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1,
185
+ attention_head_dim: Union[int, Tuple[int, ...]] = 8,
186
+ num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None,
187
+ use_linear_projection: bool = False,
188
+ upcast_attention: bool = False,
189
+ resnet_time_scale_shift: str = "default",
190
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
191
+ global_pool_conditions: bool = False,
192
+ controlnet_conditioning_channel_order: str = "rgb",
193
+ motion_max_seq_length: int = 32,
194
+ motion_num_attention_heads: int = 8,
195
+ concat_conditioning_mask: bool = True,
196
+ use_simplified_condition_embedding: bool = True,
197
+ ):
198
+ super().__init__()
199
+ self.use_simplified_condition_embedding = use_simplified_condition_embedding
200
+
201
+ # If `num_attention_heads` is not defined (which is the case for most models)
202
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
203
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
204
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
205
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
206
+ # which is why we correct for the naming here.
207
+ num_attention_heads = num_attention_heads or attention_head_dim
208
+
209
+ # Check inputs
210
+ if len(block_out_channels) != len(down_block_types):
211
+ raise ValueError(
212
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
213
+ )
214
+
215
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
216
+ raise ValueError(
217
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
218
+ )
219
+
220
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
221
+ raise ValueError(
222
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
223
+ )
224
+
225
+ if isinstance(transformer_layers_per_block, int):
226
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
227
+ if isinstance(temporal_transformer_layers_per_block, int):
228
+ temporal_transformer_layers_per_block = [temporal_transformer_layers_per_block] * len(down_block_types)
229
+
230
+ # input
231
+ conv_in_kernel = 3
232
+ conv_in_padding = (conv_in_kernel - 1) // 2
233
+ self.conv_in = nn.Conv2d(
234
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
235
+ )
236
+
237
+ if concat_conditioning_mask:
238
+ conditioning_channels = conditioning_channels + 1
239
+
240
+ self.concat_conditioning_mask = concat_conditioning_mask
241
+
242
+ # control net conditioning embedding
243
+ if use_simplified_condition_embedding:
244
+ self.controlnet_cond_embedding = zero_module(
245
+ nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
246
+ )
247
+ else:
248
+ self.controlnet_cond_embedding = SparseControlNetConditioningEmbedding(
249
+ conditioning_embedding_channels=block_out_channels[0],
250
+ block_out_channels=conditioning_embedding_out_channels,
251
+ conditioning_channels=conditioning_channels,
252
+ )
253
+
254
+ # time
255
+ time_embed_dim = block_out_channels[0] * 4
256
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
257
+ timestep_input_dim = block_out_channels[0]
258
+
259
+ self.time_embedding = TimestepEmbedding(
260
+ timestep_input_dim,
261
+ time_embed_dim,
262
+ act_fn=act_fn,
263
+ )
264
+
265
+ self.down_blocks = nn.ModuleList([])
266
+ self.controlnet_down_blocks = nn.ModuleList([])
267
+
268
+ if isinstance(cross_attention_dim, int):
269
+ cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
270
+
271
+ if isinstance(only_cross_attention, bool):
272
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
273
+
274
+ if isinstance(attention_head_dim, int):
275
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
276
+
277
+ if isinstance(num_attention_heads, int):
278
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
279
+
280
+ if isinstance(motion_num_attention_heads, int):
281
+ motion_num_attention_heads = (motion_num_attention_heads,) * len(down_block_types)
282
+
283
+ # down
284
+ output_channel = block_out_channels[0]
285
+
286
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
287
+ controlnet_block = zero_module(controlnet_block)
288
+ self.controlnet_down_blocks.append(controlnet_block)
289
+
290
+ for i, down_block_type in enumerate(down_block_types):
291
+ input_channel = output_channel
292
+ output_channel = block_out_channels[i]
293
+ is_final_block = i == len(block_out_channels) - 1
294
+
295
+ if down_block_type == "CrossAttnDownBlockMotion":
296
+ down_block = CrossAttnDownBlockMotion(
297
+ in_channels=input_channel,
298
+ out_channels=output_channel,
299
+ temb_channels=time_embed_dim,
300
+ dropout=0,
301
+ num_layers=layers_per_block,
302
+ transformer_layers_per_block=transformer_layers_per_block[i],
303
+ resnet_eps=norm_eps,
304
+ resnet_time_scale_shift=resnet_time_scale_shift,
305
+ resnet_act_fn=act_fn,
306
+ resnet_groups=norm_num_groups,
307
+ resnet_pre_norm=True,
308
+ num_attention_heads=num_attention_heads[i],
309
+ cross_attention_dim=cross_attention_dim[i],
310
+ add_downsample=not is_final_block,
311
+ dual_cross_attention=False,
312
+ use_linear_projection=use_linear_projection,
313
+ only_cross_attention=only_cross_attention[i],
314
+ upcast_attention=upcast_attention,
315
+ temporal_num_attention_heads=motion_num_attention_heads[i],
316
+ temporal_max_seq_length=motion_max_seq_length,
317
+ temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i],
318
+ temporal_double_self_attention=False,
319
+ )
320
+ elif down_block_type == "DownBlockMotion":
321
+ down_block = DownBlockMotion(
322
+ in_channels=input_channel,
323
+ out_channels=output_channel,
324
+ temb_channels=time_embed_dim,
325
+ dropout=0,
326
+ num_layers=layers_per_block,
327
+ resnet_eps=norm_eps,
328
+ resnet_time_scale_shift=resnet_time_scale_shift,
329
+ resnet_act_fn=act_fn,
330
+ resnet_groups=norm_num_groups,
331
+ resnet_pre_norm=True,
332
+ add_downsample=not is_final_block,
333
+ temporal_num_attention_heads=motion_num_attention_heads[i],
334
+ temporal_max_seq_length=motion_max_seq_length,
335
+ temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i],
336
+ temporal_double_self_attention=False,
337
+ )
338
+ else:
339
+ raise ValueError(
340
+ "Invalid `block_type` encountered. Must be one of `CrossAttnDownBlockMotion` or `DownBlockMotion`"
341
+ )
342
+
343
+ self.down_blocks.append(down_block)
344
+
345
+ for _ in range(layers_per_block):
346
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
347
+ controlnet_block = zero_module(controlnet_block)
348
+ self.controlnet_down_blocks.append(controlnet_block)
349
+
350
+ if not is_final_block:
351
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
352
+ controlnet_block = zero_module(controlnet_block)
353
+ self.controlnet_down_blocks.append(controlnet_block)
354
+
355
+ # mid
356
+ mid_block_channels = block_out_channels[-1]
357
+
358
+ controlnet_block = nn.Conv2d(mid_block_channels, mid_block_channels, kernel_size=1)
359
+ controlnet_block = zero_module(controlnet_block)
360
+ self.controlnet_mid_block = controlnet_block
361
+
362
+ if transformer_layers_per_mid_block is None:
363
+ transformer_layers_per_mid_block = (
364
+ transformer_layers_per_block[-1] if isinstance(transformer_layers_per_block[-1], int) else 1
365
+ )
366
+
367
+ self.mid_block = UNetMidBlock2DCrossAttn(
368
+ in_channels=mid_block_channels,
369
+ temb_channels=time_embed_dim,
370
+ dropout=0,
371
+ num_layers=1,
372
+ transformer_layers_per_block=transformer_layers_per_mid_block,
373
+ resnet_eps=norm_eps,
374
+ resnet_time_scale_shift=resnet_time_scale_shift,
375
+ resnet_act_fn=act_fn,
376
+ resnet_groups=norm_num_groups,
377
+ resnet_pre_norm=True,
378
+ num_attention_heads=num_attention_heads[-1],
379
+ output_scale_factor=mid_block_scale_factor,
380
+ cross_attention_dim=cross_attention_dim[-1],
381
+ dual_cross_attention=False,
382
+ use_linear_projection=use_linear_projection,
383
+ upcast_attention=upcast_attention,
384
+ attention_type="default",
385
+ )
386
+
387
+ @classmethod
388
+ def from_unet(
389
+ cls,
390
+ unet: UNet2DConditionModel,
391
+ controlnet_conditioning_channel_order: str = "rgb",
392
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
393
+ load_weights_from_unet: bool = True,
394
+ conditioning_channels: int = 3,
395
+ ) -> "SparseControlNetModel":
396
+ r"""
397
+ Instantiate a [`SparseControlNetModel`] from [`UNet2DConditionModel`].
398
+
399
+ Parameters:
400
+ unet (`UNet2DConditionModel`):
401
+ The UNet model weights to copy to the [`SparseControlNetModel`]. All configuration options are also
402
+ copied where applicable.
403
+ """
404
+ transformer_layers_per_block = (
405
+ unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1
406
+ )
407
+ down_block_types = unet.config.down_block_types
408
+
409
+ for i in range(len(down_block_types)):
410
+ if "CrossAttn" in down_block_types[i]:
411
+ down_block_types[i] = "CrossAttnDownBlockMotion"
412
+ elif "Down" in down_block_types[i]:
413
+ down_block_types[i] = "DownBlockMotion"
414
+ else:
415
+ raise ValueError("Invalid `block_type` encountered. Must be a cross-attention or down block")
416
+
417
+ controlnet = cls(
418
+ in_channels=unet.config.in_channels,
419
+ conditioning_channels=conditioning_channels,
420
+ flip_sin_to_cos=unet.config.flip_sin_to_cos,
421
+ freq_shift=unet.config.freq_shift,
422
+ down_block_types=unet.config.down_block_types,
423
+ only_cross_attention=unet.config.only_cross_attention,
424
+ block_out_channels=unet.config.block_out_channels,
425
+ layers_per_block=unet.config.layers_per_block,
426
+ downsample_padding=unet.config.downsample_padding,
427
+ mid_block_scale_factor=unet.config.mid_block_scale_factor,
428
+ act_fn=unet.config.act_fn,
429
+ norm_num_groups=unet.config.norm_num_groups,
430
+ norm_eps=unet.config.norm_eps,
431
+ cross_attention_dim=unet.config.cross_attention_dim,
432
+ transformer_layers_per_block=transformer_layers_per_block,
433
+ attention_head_dim=unet.config.attention_head_dim,
434
+ num_attention_heads=unet.config.num_attention_heads,
435
+ use_linear_projection=unet.config.use_linear_projection,
436
+ upcast_attention=unet.config.upcast_attention,
437
+ resnet_time_scale_shift=unet.config.resnet_time_scale_shift,
438
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
439
+ controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
440
+ )
441
+
442
+ if load_weights_from_unet:
443
+ controlnet.conv_in.load_state_dict(unet.conv_in.state_dict(), strict=False)
444
+ controlnet.time_proj.load_state_dict(unet.time_proj.state_dict(), strict=False)
445
+ controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict(), strict=False)
446
+ controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict(), strict=False)
447
+ controlnet.mid_block.load_state_dict(unet.mid_block.state_dict(), strict=False)
448
+
449
+ return controlnet
450
+
451
+ @property
452
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
453
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
454
+ r"""
455
+ Returns:
456
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
457
+ indexed by its weight name.
458
+ """
459
+ # set recursively
460
+ processors = {}
461
+
462
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
463
+ if hasattr(module, "get_processor"):
464
+ processors[f"{name}.processor"] = module.get_processor()
465
+
466
+ for sub_name, child in module.named_children():
467
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
468
+
469
+ return processors
470
+
471
+ for name, module in self.named_children():
472
+ fn_recursive_add_processors(name, module, processors)
473
+
474
+ return processors
475
+
476
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
477
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
478
+ r"""
479
+ Sets the attention processor to use to compute attention.
480
+
481
+ Parameters:
482
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
483
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
484
+ for **all** `Attention` layers.
485
+
486
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
487
+ processor. This is strongly recommended when setting trainable attention processors.
488
+
489
+ """
490
+ count = len(self.attn_processors.keys())
491
+
492
+ if isinstance(processor, dict) and len(processor) != count:
493
+ raise ValueError(
494
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
495
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
496
+ )
497
+
498
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
499
+ if hasattr(module, "set_processor"):
500
+ if not isinstance(processor, dict):
501
+ module.set_processor(processor)
502
+ else:
503
+ module.set_processor(processor.pop(f"{name}.processor"))
504
+
505
+ for sub_name, child in module.named_children():
506
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
507
+
508
+ for name, module in self.named_children():
509
+ fn_recursive_attn_processor(name, module, processor)
510
+
511
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
512
+ def set_default_attn_processor(self):
513
+ """
514
+ Disables custom attention processors and sets the default attention implementation.
515
+ """
516
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
517
+ processor = AttnAddedKVProcessor()
518
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
519
+ processor = AttnProcessor()
520
+ else:
521
+ raise ValueError(
522
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
523
+ )
524
+
525
+ self.set_attn_processor(processor)
526
+
527
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice
528
+ def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None:
529
+ r"""
530
+ Enable sliced attention computation.
531
+
532
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
533
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
534
+
535
+ Args:
536
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
537
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
538
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
539
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
540
+ must be a multiple of `slice_size`.
541
+ """
542
+ sliceable_head_dims = []
543
+
544
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
545
+ if hasattr(module, "set_attention_slice"):
546
+ sliceable_head_dims.append(module.sliceable_head_dim)
547
+
548
+ for child in module.children():
549
+ fn_recursive_retrieve_sliceable_dims(child)
550
+
551
+ # retrieve number of attention layers
552
+ for module in self.children():
553
+ fn_recursive_retrieve_sliceable_dims(module)
554
+
555
+ num_sliceable_layers = len(sliceable_head_dims)
556
+
557
+ if slice_size == "auto":
558
+ # half the attention head size is usually a good trade-off between
559
+ # speed and memory
560
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
561
+ elif slice_size == "max":
562
+ # make smallest slice possible
563
+ slice_size = num_sliceable_layers * [1]
564
+
565
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
566
+
567
+ if len(slice_size) != len(sliceable_head_dims):
568
+ raise ValueError(
569
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
570
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
571
+ )
572
+
573
+ for i in range(len(slice_size)):
574
+ size = slice_size[i]
575
+ dim = sliceable_head_dims[i]
576
+ if size is not None and size > dim:
577
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
578
+
579
+ # Recursively walk through all the children.
580
+ # Any children which exposes the set_attention_slice method
581
+ # gets the message
582
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
583
+ if hasattr(module, "set_attention_slice"):
584
+ module.set_attention_slice(slice_size.pop())
585
+
586
+ for child in module.children():
587
+ fn_recursive_set_attention_slice(child, slice_size)
588
+
589
+ reversed_slice_size = list(reversed(slice_size))
590
+ for module in self.children():
591
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
592
+
593
+ def forward(
594
+ self,
595
+ sample: torch.Tensor,
596
+ timestep: Union[torch.Tensor, float, int],
597
+ encoder_hidden_states: torch.Tensor,
598
+ controlnet_cond: torch.Tensor,
599
+ conditioning_scale: float = 1.0,
600
+ timestep_cond: Optional[torch.Tensor] = None,
601
+ attention_mask: Optional[torch.Tensor] = None,
602
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
603
+ conditioning_mask: Optional[torch.Tensor] = None,
604
+ guess_mode: bool = False,
605
+ return_dict: bool = True,
606
+ ) -> Union[SparseControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]:
607
+ """
608
+ The [`SparseControlNetModel`] forward method.
609
+
610
+ Args:
611
+ sample (`torch.Tensor`):
612
+ The noisy input tensor.
613
+ timestep (`Union[torch.Tensor, float, int]`):
614
+ The number of timesteps to denoise an input.
615
+ encoder_hidden_states (`torch.Tensor`):
616
+ The encoder hidden states.
617
+ controlnet_cond (`torch.Tensor`):
618
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
619
+ conditioning_scale (`float`, defaults to `1.0`):
620
+ The scale factor for ControlNet outputs.
621
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
622
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
623
+ timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
624
+ Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
625
+ timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
626
+ embeddings.
627
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
628
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
629
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
630
+ negative values to the attention scores corresponding to "discard" tokens.
631
+ added_cond_kwargs (`dict`):
632
+ Additional conditions for the Stable Diffusion XL UNet.
633
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
634
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
635
+ guess_mode (`bool`, defaults to `False`):
636
+ In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
637
+ you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
638
+ return_dict (`bool`, defaults to `True`):
639
+ Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
640
+ Returns:
641
+ [`~models.controlnet.ControlNetOutput`] **or** `tuple`:
642
+ If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is
643
+ returned where the first element is the sample tensor.
644
+ """
645
+ sample_batch_size, sample_channels, sample_num_frames, sample_height, sample_width = sample.shape
646
+ sample = torch.zeros_like(sample)
647
+
648
+ # check channel order
649
+ channel_order = self.config.controlnet_conditioning_channel_order
650
+
651
+ if channel_order == "rgb":
652
+ # in rgb order by default
653
+ ...
654
+ elif channel_order == "bgr":
655
+ controlnet_cond = torch.flip(controlnet_cond, dims=[1])
656
+ else:
657
+ raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
658
+
659
+ # prepare attention_mask
660
+ if attention_mask is not None:
661
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
662
+ attention_mask = attention_mask.unsqueeze(1)
663
+
664
+ # 1. time
665
+ timesteps = timestep
666
+ if not torch.is_tensor(timesteps):
667
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
668
+ # This would be a good case for the `match` statement (Python 3.10+)
669
+ is_mps = sample.device.type == "mps"
670
+ is_npu = sample.device.type == "npu"
671
+ if isinstance(timestep, float):
672
+ dtype = torch.float32 if (is_mps or is_npu) else torch.float64
673
+ else:
674
+ dtype = torch.int32 if (is_mps or is_npu) else torch.int64
675
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
676
+ elif len(timesteps.shape) == 0:
677
+ timesteps = timesteps[None].to(sample.device)
678
+
679
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
680
+ timesteps = timesteps.expand(sample.shape[0])
681
+
682
+ t_emb = self.time_proj(timesteps)
683
+
684
+ # timesteps does not contain any weights and will always return f32 tensors
685
+ # but time_embedding might actually be running in fp16. so we need to cast here.
686
+ # there might be better ways to encapsulate this.
687
+ t_emb = t_emb.to(dtype=sample.dtype)
688
+
689
+ emb = self.time_embedding(t_emb, timestep_cond)
690
+ emb = emb.repeat_interleave(sample_num_frames, dim=0, output_size=emb.shape[0] * sample_num_frames)
691
+
692
+ # 2. pre-process
693
+ batch_size, channels, num_frames, height, width = sample.shape
694
+
695
+ sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
696
+ sample = self.conv_in(sample)
697
+
698
+ batch_frames, channels, height, width = sample.shape
699
+ sample = sample[:, None].reshape(sample_batch_size, sample_num_frames, channels, height, width)
700
+
701
+ if self.concat_conditioning_mask:
702
+ controlnet_cond = torch.cat([controlnet_cond, conditioning_mask], dim=1)
703
+
704
+ batch_size, channels, num_frames, height, width = controlnet_cond.shape
705
+ controlnet_cond = controlnet_cond.permute(0, 2, 1, 3, 4).reshape(
706
+ batch_size * num_frames, channels, height, width
707
+ )
708
+ controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
709
+ batch_frames, channels, height, width = controlnet_cond.shape
710
+ controlnet_cond = controlnet_cond[:, None].reshape(batch_size, num_frames, channels, height, width)
711
+
712
+ sample = sample + controlnet_cond
713
+
714
+ batch_size, num_frames, channels, height, width = sample.shape
715
+ sample = sample.reshape(sample_batch_size * sample_num_frames, channels, height, width)
716
+
717
+ # 3. down
718
+ down_block_res_samples = (sample,)
719
+ for downsample_block in self.down_blocks:
720
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
721
+ sample, res_samples = downsample_block(
722
+ hidden_states=sample,
723
+ temb=emb,
724
+ encoder_hidden_states=encoder_hidden_states,
725
+ attention_mask=attention_mask,
726
+ num_frames=num_frames,
727
+ cross_attention_kwargs=cross_attention_kwargs,
728
+ )
729
+ else:
730
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)
731
+
732
+ down_block_res_samples += res_samples
733
+
734
+ # 4. mid
735
+ if self.mid_block is not None:
736
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
737
+ sample = self.mid_block(
738
+ sample,
739
+ emb,
740
+ encoder_hidden_states=encoder_hidden_states,
741
+ attention_mask=attention_mask,
742
+ cross_attention_kwargs=cross_attention_kwargs,
743
+ )
744
+ else:
745
+ sample = self.mid_block(sample, emb)
746
+
747
+ # 5. Control net blocks
748
+ controlnet_down_block_res_samples = ()
749
+
750
+ for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
751
+ down_block_res_sample = controlnet_block(down_block_res_sample)
752
+ controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
753
+
754
+ down_block_res_samples = controlnet_down_block_res_samples
755
+ mid_block_res_sample = self.controlnet_mid_block(sample)
756
+
757
+ # 6. scaling
758
+ if guess_mode and not self.config.global_pool_conditions:
759
+ scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
760
+ scales = scales * conditioning_scale
761
+ down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
762
+ mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
763
+ else:
764
+ down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
765
+ mid_block_res_sample = mid_block_res_sample * conditioning_scale
766
+
767
+ if self.config.global_pool_conditions:
768
+ down_block_res_samples = [
769
+ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
770
+ ]
771
+ mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True)
772
+
773
+ if not return_dict:
774
+ return (down_block_res_samples, mid_block_res_sample)
775
+
776
+ return SparseControlNetOutput(
777
+ down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
778
+ )
779
+
780
+
781
+ # Copied from diffusers.models.controlnets.controlnet.zero_module
782
+ def zero_module(module: nn.Module) -> nn.Module:
783
+ for p in module.parameters():
784
+ nn.init.zeros_(p)
785
+ return module
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_union.py ADDED
@@ -0,0 +1,841 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Dict, List, Optional, Tuple, Union
15
+
16
+ import torch
17
+ from torch import nn
18
+
19
+ from ...configuration_utils import ConfigMixin, register_to_config
20
+ from ...loaders.single_file_model import FromOriginalModelMixin
21
+ from ...utils import logging
22
+ from ..attention_processor import (
23
+ ADDED_KV_ATTENTION_PROCESSORS,
24
+ CROSS_ATTENTION_PROCESSORS,
25
+ AttentionProcessor,
26
+ AttnAddedKVProcessor,
27
+ AttnProcessor,
28
+ )
29
+ from ..embeddings import TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps
30
+ from ..modeling_utils import ModelMixin
31
+ from ..unets.unet_2d_blocks import (
32
+ UNetMidBlock2DCrossAttn,
33
+ get_down_block,
34
+ )
35
+ from ..unets.unet_2d_condition import UNet2DConditionModel
36
+ from .controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
37
+
38
+
39
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
+
41
+
42
+ class QuickGELU(nn.Module):
43
+ """
44
+ Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
45
+ """
46
+
47
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
48
+ return input * torch.sigmoid(1.702 * input)
49
+
50
+
51
+ class ResidualAttentionMlp(nn.Module):
52
+ def __init__(self, d_model: int):
53
+ super().__init__()
54
+ self.c_fc = nn.Linear(d_model, d_model * 4)
55
+ self.gelu = QuickGELU()
56
+ self.c_proj = nn.Linear(d_model * 4, d_model)
57
+
58
+ def forward(self, x: torch.Tensor):
59
+ x = self.c_fc(x)
60
+ x = self.gelu(x)
61
+ x = self.c_proj(x)
62
+ return x
63
+
64
+
65
+ class ResidualAttentionBlock(nn.Module):
66
+ def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
67
+ super().__init__()
68
+ self.attn = nn.MultiheadAttention(d_model, n_head)
69
+ self.ln_1 = nn.LayerNorm(d_model)
70
+ self.mlp = ResidualAttentionMlp(d_model)
71
+ self.ln_2 = nn.LayerNorm(d_model)
72
+ self.attn_mask = attn_mask
73
+
74
+ def attention(self, x: torch.Tensor):
75
+ self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
76
+ return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
77
+
78
+ def forward(self, x: torch.Tensor):
79
+ x = x + self.attention(self.ln_1(x))
80
+ x = x + self.mlp(self.ln_2(x))
81
+ return x
82
+
83
+
84
+ class ControlNetUnionModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
85
+ """
86
+ A ControlNetUnion model.
87
+
88
+ Args:
89
+ in_channels (`int`, defaults to 4):
90
+ The number of channels in the input sample.
91
+ flip_sin_to_cos (`bool`, defaults to `True`):
92
+ Whether to flip the sin to cos in the time embedding.
93
+ freq_shift (`int`, defaults to 0):
94
+ The frequency shift to apply to the time embedding.
95
+ down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
96
+ The tuple of downsample blocks to use.
97
+ only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`):
98
+ block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`):
99
+ The tuple of output channels for each block.
100
+ layers_per_block (`int`, defaults to 2):
101
+ The number of layers per block.
102
+ downsample_padding (`int`, defaults to 1):
103
+ The padding to use for the downsampling convolution.
104
+ mid_block_scale_factor (`float`, defaults to 1):
105
+ The scale factor to use for the mid block.
106
+ act_fn (`str`, defaults to "silu"):
107
+ The activation function to use.
108
+ norm_num_groups (`int`, *optional*, defaults to 32):
109
+ The number of groups to use for the normalization. If None, normalization and activation layers is skipped
110
+ in post-processing.
111
+ norm_eps (`float`, defaults to 1e-5):
112
+ The epsilon to use for the normalization.
113
+ cross_attention_dim (`int`, defaults to 1280):
114
+ The dimension of the cross attention features.
115
+ transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
116
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
117
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
118
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
119
+ encoder_hid_dim (`int`, *optional*, defaults to None):
120
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
121
+ dimension to `cross_attention_dim`.
122
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
123
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
124
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
125
+ attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8):
126
+ The dimension of the attention heads.
127
+ use_linear_projection (`bool`, defaults to `False`):
128
+ class_embed_type (`str`, *optional*, defaults to `None`):
129
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None,
130
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
131
+ addition_embed_type (`str`, *optional*, defaults to `None`):
132
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
133
+ "text". "text" will use the `TextTimeEmbedding` layer.
134
+ num_class_embeds (`int`, *optional*, defaults to 0):
135
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
136
+ class conditioning with `class_embed_type` equal to `None`.
137
+ upcast_attention (`bool`, defaults to `False`):
138
+ resnet_time_scale_shift (`str`, defaults to `"default"`):
139
+ Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`.
140
+ projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`):
141
+ The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when
142
+ `class_embed_type="projection"`.
143
+ controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
144
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
145
+ conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(48, 96, 192, 384)`):
146
+ The tuple of output channel for each block in the `conditioning_embedding` layer.
147
+ global_pool_conditions (`bool`, defaults to `False`):
148
+ """
149
+
150
+ _supports_gradient_checkpointing = True
151
+
152
+ @register_to_config
153
+ def __init__(
154
+ self,
155
+ in_channels: int = 4,
156
+ conditioning_channels: int = 3,
157
+ flip_sin_to_cos: bool = True,
158
+ freq_shift: int = 0,
159
+ down_block_types: Tuple[str, ...] = (
160
+ "CrossAttnDownBlock2D",
161
+ "CrossAttnDownBlock2D",
162
+ "CrossAttnDownBlock2D",
163
+ "DownBlock2D",
164
+ ),
165
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
166
+ block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
167
+ layers_per_block: int = 2,
168
+ downsample_padding: int = 1,
169
+ mid_block_scale_factor: float = 1,
170
+ act_fn: str = "silu",
171
+ norm_num_groups: Optional[int] = 32,
172
+ norm_eps: float = 1e-5,
173
+ cross_attention_dim: int = 1280,
174
+ transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1,
175
+ encoder_hid_dim: Optional[int] = None,
176
+ encoder_hid_dim_type: Optional[str] = None,
177
+ attention_head_dim: Union[int, Tuple[int, ...]] = 8,
178
+ num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None,
179
+ use_linear_projection: bool = False,
180
+ class_embed_type: Optional[str] = None,
181
+ addition_embed_type: Optional[str] = None,
182
+ addition_time_embed_dim: Optional[int] = None,
183
+ num_class_embeds: Optional[int] = None,
184
+ upcast_attention: bool = False,
185
+ resnet_time_scale_shift: str = "default",
186
+ projection_class_embeddings_input_dim: Optional[int] = None,
187
+ controlnet_conditioning_channel_order: str = "rgb",
188
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (48, 96, 192, 384),
189
+ global_pool_conditions: bool = False,
190
+ addition_embed_type_num_heads: int = 64,
191
+ num_control_type: int = 6,
192
+ num_trans_channel: int = 320,
193
+ num_trans_head: int = 8,
194
+ num_trans_layer: int = 1,
195
+ num_proj_channel: int = 320,
196
+ ):
197
+ super().__init__()
198
+
199
+ # If `num_attention_heads` is not defined (which is the case for most models)
200
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
201
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
202
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
203
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
204
+ # which is why we correct for the naming here.
205
+ num_attention_heads = num_attention_heads or attention_head_dim
206
+
207
+ # Check inputs
208
+ if len(block_out_channels) != len(down_block_types):
209
+ raise ValueError(
210
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
211
+ )
212
+
213
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
214
+ raise ValueError(
215
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
216
+ )
217
+
218
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
219
+ raise ValueError(
220
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
221
+ )
222
+
223
+ if isinstance(transformer_layers_per_block, int):
224
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
225
+
226
+ # input
227
+ conv_in_kernel = 3
228
+ conv_in_padding = (conv_in_kernel - 1) // 2
229
+ self.conv_in = nn.Conv2d(
230
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
231
+ )
232
+
233
+ # time
234
+ time_embed_dim = block_out_channels[0] * 4
235
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
236
+ timestep_input_dim = block_out_channels[0]
237
+ self.time_embedding = TimestepEmbedding(
238
+ timestep_input_dim,
239
+ time_embed_dim,
240
+ act_fn=act_fn,
241
+ )
242
+
243
+ if encoder_hid_dim_type is not None:
244
+ raise ValueError(f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None.")
245
+ else:
246
+ self.encoder_hid_proj = None
247
+
248
+ # class embedding
249
+ if class_embed_type is None and num_class_embeds is not None:
250
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
251
+ elif class_embed_type == "timestep":
252
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
253
+ elif class_embed_type == "identity":
254
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
255
+ elif class_embed_type == "projection":
256
+ if projection_class_embeddings_input_dim is None:
257
+ raise ValueError(
258
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
259
+ )
260
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
261
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
262
+ # 2. it projects from an arbitrary input dimension.
263
+ #
264
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
265
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
266
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
267
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
268
+ else:
269
+ self.class_embedding = None
270
+
271
+ if addition_embed_type == "text":
272
+ if encoder_hid_dim is not None:
273
+ text_time_embedding_from_dim = encoder_hid_dim
274
+ else:
275
+ text_time_embedding_from_dim = cross_attention_dim
276
+
277
+ self.add_embedding = TextTimeEmbedding(
278
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
279
+ )
280
+ elif addition_embed_type == "text_image":
281
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
282
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
283
+ # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
284
+ self.add_embedding = TextImageTimeEmbedding(
285
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
286
+ )
287
+ elif addition_embed_type == "text_time":
288
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
289
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
290
+
291
+ elif addition_embed_type is not None:
292
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
293
+
294
+ # control net conditioning embedding
295
+ self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
296
+ conditioning_embedding_channels=block_out_channels[0],
297
+ block_out_channels=conditioning_embedding_out_channels,
298
+ conditioning_channels=conditioning_channels,
299
+ )
300
+
301
+ task_scale_factor = num_trans_channel**0.5
302
+ self.task_embedding = nn.Parameter(task_scale_factor * torch.randn(num_control_type, num_trans_channel))
303
+ self.transformer_layes = nn.ModuleList(
304
+ [ResidualAttentionBlock(num_trans_channel, num_trans_head) for _ in range(num_trans_layer)]
305
+ )
306
+ self.spatial_ch_projs = zero_module(nn.Linear(num_trans_channel, num_proj_channel))
307
+ self.control_type_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
308
+ self.control_add_embedding = TimestepEmbedding(addition_time_embed_dim * num_control_type, time_embed_dim)
309
+
310
+ self.down_blocks = nn.ModuleList([])
311
+ self.controlnet_down_blocks = nn.ModuleList([])
312
+
313
+ if isinstance(only_cross_attention, bool):
314
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
315
+
316
+ if isinstance(attention_head_dim, int):
317
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
318
+
319
+ if isinstance(num_attention_heads, int):
320
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
321
+
322
+ # down
323
+ output_channel = block_out_channels[0]
324
+
325
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
326
+ controlnet_block = zero_module(controlnet_block)
327
+ self.controlnet_down_blocks.append(controlnet_block)
328
+
329
+ for i, down_block_type in enumerate(down_block_types):
330
+ input_channel = output_channel
331
+ output_channel = block_out_channels[i]
332
+ is_final_block = i == len(block_out_channels) - 1
333
+
334
+ down_block = get_down_block(
335
+ down_block_type,
336
+ num_layers=layers_per_block,
337
+ transformer_layers_per_block=transformer_layers_per_block[i],
338
+ in_channels=input_channel,
339
+ out_channels=output_channel,
340
+ temb_channels=time_embed_dim,
341
+ add_downsample=not is_final_block,
342
+ resnet_eps=norm_eps,
343
+ resnet_act_fn=act_fn,
344
+ resnet_groups=norm_num_groups,
345
+ cross_attention_dim=cross_attention_dim,
346
+ num_attention_heads=num_attention_heads[i],
347
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
348
+ downsample_padding=downsample_padding,
349
+ use_linear_projection=use_linear_projection,
350
+ only_cross_attention=only_cross_attention[i],
351
+ upcast_attention=upcast_attention,
352
+ resnet_time_scale_shift=resnet_time_scale_shift,
353
+ )
354
+ self.down_blocks.append(down_block)
355
+
356
+ for _ in range(layers_per_block):
357
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
358
+ controlnet_block = zero_module(controlnet_block)
359
+ self.controlnet_down_blocks.append(controlnet_block)
360
+
361
+ if not is_final_block:
362
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
363
+ controlnet_block = zero_module(controlnet_block)
364
+ self.controlnet_down_blocks.append(controlnet_block)
365
+
366
+ # mid
367
+ mid_block_channel = block_out_channels[-1]
368
+
369
+ controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)
370
+ controlnet_block = zero_module(controlnet_block)
371
+ self.controlnet_mid_block = controlnet_block
372
+
373
+ self.mid_block = UNetMidBlock2DCrossAttn(
374
+ transformer_layers_per_block=transformer_layers_per_block[-1],
375
+ in_channels=mid_block_channel,
376
+ temb_channels=time_embed_dim,
377
+ resnet_eps=norm_eps,
378
+ resnet_act_fn=act_fn,
379
+ output_scale_factor=mid_block_scale_factor,
380
+ resnet_time_scale_shift=resnet_time_scale_shift,
381
+ cross_attention_dim=cross_attention_dim,
382
+ num_attention_heads=num_attention_heads[-1],
383
+ resnet_groups=norm_num_groups,
384
+ use_linear_projection=use_linear_projection,
385
+ upcast_attention=upcast_attention,
386
+ )
387
+
388
+ @classmethod
389
+ def from_unet(
390
+ cls,
391
+ unet: UNet2DConditionModel,
392
+ controlnet_conditioning_channel_order: str = "rgb",
393
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
394
+ load_weights_from_unet: bool = True,
395
+ ):
396
+ r"""
397
+ Instantiate a [`ControlNetUnionModel`] from [`UNet2DConditionModel`].
398
+
399
+ Parameters:
400
+ unet (`UNet2DConditionModel`):
401
+ The UNet model weights to copy to the [`ControlNetUnionModel`]. All configuration options are also
402
+ copied where applicable.
403
+ """
404
+ transformer_layers_per_block = (
405
+ unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1
406
+ )
407
+ encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None
408
+ encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None
409
+ addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None
410
+ addition_time_embed_dim = (
411
+ unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None
412
+ )
413
+
414
+ controlnet = cls(
415
+ encoder_hid_dim=encoder_hid_dim,
416
+ encoder_hid_dim_type=encoder_hid_dim_type,
417
+ addition_embed_type=addition_embed_type,
418
+ addition_time_embed_dim=addition_time_embed_dim,
419
+ transformer_layers_per_block=transformer_layers_per_block,
420
+ in_channels=unet.config.in_channels,
421
+ flip_sin_to_cos=unet.config.flip_sin_to_cos,
422
+ freq_shift=unet.config.freq_shift,
423
+ down_block_types=unet.config.down_block_types,
424
+ only_cross_attention=unet.config.only_cross_attention,
425
+ block_out_channels=unet.config.block_out_channels,
426
+ layers_per_block=unet.config.layers_per_block,
427
+ downsample_padding=unet.config.downsample_padding,
428
+ mid_block_scale_factor=unet.config.mid_block_scale_factor,
429
+ act_fn=unet.config.act_fn,
430
+ norm_num_groups=unet.config.norm_num_groups,
431
+ norm_eps=unet.config.norm_eps,
432
+ cross_attention_dim=unet.config.cross_attention_dim,
433
+ attention_head_dim=unet.config.attention_head_dim,
434
+ num_attention_heads=unet.config.num_attention_heads,
435
+ use_linear_projection=unet.config.use_linear_projection,
436
+ class_embed_type=unet.config.class_embed_type,
437
+ num_class_embeds=unet.config.num_class_embeds,
438
+ upcast_attention=unet.config.upcast_attention,
439
+ resnet_time_scale_shift=unet.config.resnet_time_scale_shift,
440
+ projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,
441
+ controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
442
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
443
+ )
444
+
445
+ if load_weights_from_unet:
446
+ controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())
447
+ controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())
448
+ controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())
449
+
450
+ if controlnet.class_embedding:
451
+ controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())
452
+
453
+ controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict(), strict=False)
454
+ controlnet.mid_block.load_state_dict(unet.mid_block.state_dict(), strict=False)
455
+
456
+ return controlnet
457
+
458
+ @property
459
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
460
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
461
+ r"""
462
+ Returns:
463
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
464
+ indexed by its weight name.
465
+ """
466
+ # set recursively
467
+ processors = {}
468
+
469
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
470
+ if hasattr(module, "get_processor"):
471
+ processors[f"{name}.processor"] = module.get_processor()
472
+
473
+ for sub_name, child in module.named_children():
474
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
475
+
476
+ return processors
477
+
478
+ for name, module in self.named_children():
479
+ fn_recursive_add_processors(name, module, processors)
480
+
481
+ return processors
482
+
483
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
484
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
485
+ r"""
486
+ Sets the attention processor to use to compute attention.
487
+
488
+ Parameters:
489
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
490
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
491
+ for **all** `Attention` layers.
492
+
493
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
494
+ processor. This is strongly recommended when setting trainable attention processors.
495
+
496
+ """
497
+ count = len(self.attn_processors.keys())
498
+
499
+ if isinstance(processor, dict) and len(processor) != count:
500
+ raise ValueError(
501
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
502
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
503
+ )
504
+
505
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
506
+ if hasattr(module, "set_processor"):
507
+ if not isinstance(processor, dict):
508
+ module.set_processor(processor)
509
+ else:
510
+ module.set_processor(processor.pop(f"{name}.processor"))
511
+
512
+ for sub_name, child in module.named_children():
513
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
514
+
515
+ for name, module in self.named_children():
516
+ fn_recursive_attn_processor(name, module, processor)
517
+
518
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
519
+ def set_default_attn_processor(self):
520
+ """
521
+ Disables custom attention processors and sets the default attention implementation.
522
+ """
523
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
524
+ processor = AttnAddedKVProcessor()
525
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
526
+ processor = AttnProcessor()
527
+ else:
528
+ raise ValueError(
529
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
530
+ )
531
+
532
+ self.set_attn_processor(processor)
533
+
534
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice
535
+ def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None:
536
+ r"""
537
+ Enable sliced attention computation.
538
+
539
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
540
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
541
+
542
+ Args:
543
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
544
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
545
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
546
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
547
+ must be a multiple of `slice_size`.
548
+ """
549
+ sliceable_head_dims = []
550
+
551
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
552
+ if hasattr(module, "set_attention_slice"):
553
+ sliceable_head_dims.append(module.sliceable_head_dim)
554
+
555
+ for child in module.children():
556
+ fn_recursive_retrieve_sliceable_dims(child)
557
+
558
+ # retrieve number of attention layers
559
+ for module in self.children():
560
+ fn_recursive_retrieve_sliceable_dims(module)
561
+
562
+ num_sliceable_layers = len(sliceable_head_dims)
563
+
564
+ if slice_size == "auto":
565
+ # half the attention head size is usually a good trade-off between
566
+ # speed and memory
567
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
568
+ elif slice_size == "max":
569
+ # make smallest slice possible
570
+ slice_size = num_sliceable_layers * [1]
571
+
572
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
573
+
574
+ if len(slice_size) != len(sliceable_head_dims):
575
+ raise ValueError(
576
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
577
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
578
+ )
579
+
580
+ for i in range(len(slice_size)):
581
+ size = slice_size[i]
582
+ dim = sliceable_head_dims[i]
583
+ if size is not None and size > dim:
584
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
585
+
586
+ # Recursively walk through all the children.
587
+ # Any children which exposes the set_attention_slice method
588
+ # gets the message
589
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
590
+ if hasattr(module, "set_attention_slice"):
591
+ module.set_attention_slice(slice_size.pop())
592
+
593
+ for child in module.children():
594
+ fn_recursive_set_attention_slice(child, slice_size)
595
+
596
+ reversed_slice_size = list(reversed(slice_size))
597
+ for module in self.children():
598
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
599
+
600
+ def forward(
601
+ self,
602
+ sample: torch.Tensor,
603
+ timestep: Union[torch.Tensor, float, int],
604
+ encoder_hidden_states: torch.Tensor,
605
+ controlnet_cond: List[torch.Tensor],
606
+ control_type: torch.Tensor,
607
+ control_type_idx: List[int],
608
+ conditioning_scale: Union[float, List[float]] = 1.0,
609
+ class_labels: Optional[torch.Tensor] = None,
610
+ timestep_cond: Optional[torch.Tensor] = None,
611
+ attention_mask: Optional[torch.Tensor] = None,
612
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
613
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
614
+ from_multi: bool = False,
615
+ guess_mode: bool = False,
616
+ return_dict: bool = True,
617
+ ) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]:
618
+ """
619
+ The [`ControlNetUnionModel`] forward method.
620
+
621
+ Args:
622
+ sample (`torch.Tensor`):
623
+ The noisy input tensor.
624
+ timestep (`Union[torch.Tensor, float, int]`):
625
+ The number of timesteps to denoise an input.
626
+ encoder_hidden_states (`torch.Tensor`):
627
+ The encoder hidden states.
628
+ controlnet_cond (`List[torch.Tensor]`):
629
+ The conditional input tensors.
630
+ control_type (`torch.Tensor`):
631
+ A tensor of shape `(batch, num_control_type)` with values `0` or `1` depending on whether the control
632
+ type is used.
633
+ control_type_idx (`List[int]`):
634
+ The indices of `control_type`.
635
+ conditioning_scale (`float`, defaults to `1.0`):
636
+ The scale factor for ControlNet outputs.
637
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
638
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
639
+ timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
640
+ Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
641
+ timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
642
+ embeddings.
643
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
644
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
645
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
646
+ negative values to the attention scores corresponding to "discard" tokens.
647
+ added_cond_kwargs (`dict`):
648
+ Additional conditions for the Stable Diffusion XL UNet.
649
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
650
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
651
+ from_multi (`bool`, defaults to `False`):
652
+ Use standard scaling when called from `MultiControlNetUnionModel`.
653
+ guess_mode (`bool`, defaults to `False`):
654
+ In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
655
+ you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
656
+ return_dict (`bool`, defaults to `True`):
657
+ Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
658
+
659
+ Returns:
660
+ [`~models.controlnet.ControlNetOutput`] **or** `tuple`:
661
+ If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is
662
+ returned where the first element is the sample tensor.
663
+ """
664
+ if isinstance(conditioning_scale, float):
665
+ conditioning_scale = [conditioning_scale] * len(controlnet_cond)
666
+
667
+ # check channel order
668
+ channel_order = self.config.controlnet_conditioning_channel_order
669
+
670
+ if channel_order != "rgb":
671
+ raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
672
+
673
+ # prepare attention_mask
674
+ if attention_mask is not None:
675
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
676
+ attention_mask = attention_mask.unsqueeze(1)
677
+
678
+ # 1. time
679
+ timesteps = timestep
680
+ if not torch.is_tensor(timesteps):
681
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
682
+ # This would be a good case for the `match` statement (Python 3.10+)
683
+ is_mps = sample.device.type == "mps"
684
+ is_npu = sample.device.type == "npu"
685
+ if isinstance(timestep, float):
686
+ dtype = torch.float32 if (is_mps or is_npu) else torch.float64
687
+ else:
688
+ dtype = torch.int32 if (is_mps or is_npu) else torch.int64
689
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
690
+ elif len(timesteps.shape) == 0:
691
+ timesteps = timesteps[None].to(sample.device)
692
+
693
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
694
+ timesteps = timesteps.expand(sample.shape[0])
695
+
696
+ t_emb = self.time_proj(timesteps)
697
+
698
+ # timesteps does not contain any weights and will always return f32 tensors
699
+ # but time_embedding might actually be running in fp16. so we need to cast here.
700
+ # there might be better ways to encapsulate this.
701
+ t_emb = t_emb.to(dtype=sample.dtype)
702
+
703
+ emb = self.time_embedding(t_emb, timestep_cond)
704
+ aug_emb = None
705
+
706
+ if self.class_embedding is not None:
707
+ if class_labels is None:
708
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
709
+
710
+ if self.config.class_embed_type == "timestep":
711
+ class_labels = self.time_proj(class_labels)
712
+
713
+ class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
714
+ emb = emb + class_emb
715
+
716
+ if self.config.addition_embed_type is not None:
717
+ if self.config.addition_embed_type == "text":
718
+ aug_emb = self.add_embedding(encoder_hidden_states)
719
+
720
+ elif self.config.addition_embed_type == "text_time":
721
+ if "text_embeds" not in added_cond_kwargs:
722
+ raise ValueError(
723
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
724
+ )
725
+ text_embeds = added_cond_kwargs.get("text_embeds")
726
+ if "time_ids" not in added_cond_kwargs:
727
+ raise ValueError(
728
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
729
+ )
730
+ time_ids = added_cond_kwargs.get("time_ids")
731
+ time_embeds = self.add_time_proj(time_ids.flatten())
732
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
733
+
734
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
735
+ add_embeds = add_embeds.to(emb.dtype)
736
+ aug_emb = self.add_embedding(add_embeds)
737
+
738
+ control_embeds = self.control_type_proj(control_type.flatten())
739
+ control_embeds = control_embeds.reshape((t_emb.shape[0], -1))
740
+ control_embeds = control_embeds.to(emb.dtype)
741
+ control_emb = self.control_add_embedding(control_embeds)
742
+ emb = emb + control_emb
743
+ emb = emb + aug_emb if aug_emb is not None else emb
744
+
745
+ # 2. pre-process
746
+ sample = self.conv_in(sample)
747
+
748
+ inputs = []
749
+ condition_list = []
750
+
751
+ for cond, control_idx, scale in zip(controlnet_cond, control_type_idx, conditioning_scale):
752
+ condition = self.controlnet_cond_embedding(cond)
753
+ feat_seq = torch.mean(condition, dim=(2, 3))
754
+ feat_seq = feat_seq + self.task_embedding[control_idx]
755
+ if from_multi or len(control_type_idx) == 1:
756
+ inputs.append(feat_seq.unsqueeze(1))
757
+ condition_list.append(condition)
758
+ else:
759
+ inputs.append(feat_seq.unsqueeze(1) * scale)
760
+ condition_list.append(condition * scale)
761
+
762
+ condition = sample
763
+ feat_seq = torch.mean(condition, dim=(2, 3))
764
+ inputs.append(feat_seq.unsqueeze(1))
765
+ condition_list.append(condition)
766
+
767
+ x = torch.cat(inputs, dim=1)
768
+ for layer in self.transformer_layes:
769
+ x = layer(x)
770
+
771
+ controlnet_cond_fuser = sample * 0.0
772
+ for (idx, condition), scale in zip(enumerate(condition_list[:-1]), conditioning_scale):
773
+ alpha = self.spatial_ch_projs(x[:, idx])
774
+ alpha = alpha.unsqueeze(-1).unsqueeze(-1)
775
+ if from_multi or len(control_type_idx) == 1:
776
+ controlnet_cond_fuser += condition + alpha
777
+ else:
778
+ controlnet_cond_fuser += condition + alpha * scale
779
+
780
+ sample = sample + controlnet_cond_fuser
781
+
782
+ # 3. down
783
+ down_block_res_samples = (sample,)
784
+ for downsample_block in self.down_blocks:
785
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
786
+ sample, res_samples = downsample_block(
787
+ hidden_states=sample,
788
+ temb=emb,
789
+ encoder_hidden_states=encoder_hidden_states,
790
+ attention_mask=attention_mask,
791
+ cross_attention_kwargs=cross_attention_kwargs,
792
+ )
793
+ else:
794
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
795
+
796
+ down_block_res_samples += res_samples
797
+
798
+ # 4. mid
799
+ if self.mid_block is not None:
800
+ sample = self.mid_block(
801
+ sample,
802
+ emb,
803
+ encoder_hidden_states=encoder_hidden_states,
804
+ attention_mask=attention_mask,
805
+ cross_attention_kwargs=cross_attention_kwargs,
806
+ )
807
+
808
+ # 5. Control net blocks
809
+ controlnet_down_block_res_samples = ()
810
+
811
+ for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
812
+ down_block_res_sample = controlnet_block(down_block_res_sample)
813
+ controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
814
+
815
+ down_block_res_samples = controlnet_down_block_res_samples
816
+
817
+ mid_block_res_sample = self.controlnet_mid_block(sample)
818
+
819
+ # 6. scaling
820
+ if guess_mode and not self.config.global_pool_conditions:
821
+ scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
822
+ if from_multi or len(control_type_idx) == 1:
823
+ scales = scales * conditioning_scale[0]
824
+ down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
825
+ mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
826
+ elif from_multi or len(control_type_idx) == 1:
827
+ down_block_res_samples = [sample * conditioning_scale[0] for sample in down_block_res_samples]
828
+ mid_block_res_sample = mid_block_res_sample * conditioning_scale[0]
829
+
830
+ if self.config.global_pool_conditions:
831
+ down_block_res_samples = [
832
+ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
833
+ ]
834
+ mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True)
835
+
836
+ if not return_dict:
837
+ return (down_block_res_samples, mid_block_res_sample)
838
+
839
+ return ControlNetOutput(
840
+ down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
841
+ )
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/controlnet_xs.py ADDED
@@ -0,0 +1,1907 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from math import gcd
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.utils.checkpoint
20
+ from torch import Tensor, nn
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...utils import BaseOutput, logging
24
+ from ...utils.torch_utils import apply_freeu
25
+ from ..attention_processor import (
26
+ ADDED_KV_ATTENTION_PROCESSORS,
27
+ CROSS_ATTENTION_PROCESSORS,
28
+ Attention,
29
+ AttentionProcessor,
30
+ AttnAddedKVProcessor,
31
+ AttnProcessor,
32
+ FusedAttnProcessor2_0,
33
+ )
34
+ from ..embeddings import TimestepEmbedding, Timesteps
35
+ from ..modeling_utils import ModelMixin
36
+ from ..unets.unet_2d_blocks import (
37
+ CrossAttnDownBlock2D,
38
+ CrossAttnUpBlock2D,
39
+ Downsample2D,
40
+ ResnetBlock2D,
41
+ Transformer2DModel,
42
+ UNetMidBlock2DCrossAttn,
43
+ Upsample2D,
44
+ )
45
+ from ..unets.unet_2d_condition import UNet2DConditionModel
46
+ from .controlnet import ControlNetConditioningEmbedding
47
+
48
+
49
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
50
+
51
+
52
+ @dataclass
53
+ class ControlNetXSOutput(BaseOutput):
54
+ """
55
+ The output of [`UNetControlNetXSModel`].
56
+
57
+ Args:
58
+ sample (`Tensor` of shape `(batch_size, num_channels, height, width)`):
59
+ The output of the `UNetControlNetXSModel`. Unlike `ControlNetOutput` this is NOT to be added to the base
60
+ model output, but is already the final output.
61
+ """
62
+
63
+ sample: Tensor = None
64
+
65
+
66
+ class DownBlockControlNetXSAdapter(nn.Module):
67
+ """Components that together with corresponding components from the base model will form a
68
+ `ControlNetXSCrossAttnDownBlock2D`"""
69
+
70
+ def __init__(
71
+ self,
72
+ resnets: nn.ModuleList,
73
+ base_to_ctrl: nn.ModuleList,
74
+ ctrl_to_base: nn.ModuleList,
75
+ attentions: Optional[nn.ModuleList] = None,
76
+ downsampler: Optional[nn.Conv2d] = None,
77
+ ):
78
+ super().__init__()
79
+ self.resnets = resnets
80
+ self.base_to_ctrl = base_to_ctrl
81
+ self.ctrl_to_base = ctrl_to_base
82
+ self.attentions = attentions
83
+ self.downsamplers = downsampler
84
+
85
+
86
+ class MidBlockControlNetXSAdapter(nn.Module):
87
+ """Components that together with corresponding components from the base model will form a
88
+ `ControlNetXSCrossAttnMidBlock2D`"""
89
+
90
+ def __init__(self, midblock: UNetMidBlock2DCrossAttn, base_to_ctrl: nn.ModuleList, ctrl_to_base: nn.ModuleList):
91
+ super().__init__()
92
+ self.midblock = midblock
93
+ self.base_to_ctrl = base_to_ctrl
94
+ self.ctrl_to_base = ctrl_to_base
95
+
96
+
97
+ class UpBlockControlNetXSAdapter(nn.Module):
98
+ """Components that together with corresponding components from the base model will form a `ControlNetXSCrossAttnUpBlock2D`"""
99
+
100
+ def __init__(self, ctrl_to_base: nn.ModuleList):
101
+ super().__init__()
102
+ self.ctrl_to_base = ctrl_to_base
103
+
104
+
105
+ def get_down_block_adapter(
106
+ base_in_channels: int,
107
+ base_out_channels: int,
108
+ ctrl_in_channels: int,
109
+ ctrl_out_channels: int,
110
+ temb_channels: int,
111
+ max_norm_num_groups: Optional[int] = 32,
112
+ has_crossattn=True,
113
+ transformer_layers_per_block: Optional[Union[int, Tuple[int]]] = 1,
114
+ num_attention_heads: Optional[int] = 1,
115
+ cross_attention_dim: Optional[int] = 1024,
116
+ add_downsample: bool = True,
117
+ upcast_attention: Optional[bool] = False,
118
+ use_linear_projection: Optional[bool] = True,
119
+ ):
120
+ num_layers = 2 # only support sd + sdxl
121
+
122
+ resnets = []
123
+ attentions = []
124
+ ctrl_to_base = []
125
+ base_to_ctrl = []
126
+
127
+ if isinstance(transformer_layers_per_block, int):
128
+ transformer_layers_per_block = [transformer_layers_per_block] * num_layers
129
+
130
+ for i in range(num_layers):
131
+ base_in_channels = base_in_channels if i == 0 else base_out_channels
132
+ ctrl_in_channels = ctrl_in_channels if i == 0 else ctrl_out_channels
133
+
134
+ # Before the resnet/attention application, information is concatted from base to control.
135
+ # Concat doesn't require change in number of channels
136
+ base_to_ctrl.append(make_zero_conv(base_in_channels, base_in_channels))
137
+
138
+ resnets.append(
139
+ ResnetBlock2D(
140
+ in_channels=ctrl_in_channels + base_in_channels, # information from base is concatted to ctrl
141
+ out_channels=ctrl_out_channels,
142
+ temb_channels=temb_channels,
143
+ groups=find_largest_factor(ctrl_in_channels + base_in_channels, max_factor=max_norm_num_groups),
144
+ groups_out=find_largest_factor(ctrl_out_channels, max_factor=max_norm_num_groups),
145
+ eps=1e-5,
146
+ )
147
+ )
148
+
149
+ if has_crossattn:
150
+ attentions.append(
151
+ Transformer2DModel(
152
+ num_attention_heads,
153
+ ctrl_out_channels // num_attention_heads,
154
+ in_channels=ctrl_out_channels,
155
+ num_layers=transformer_layers_per_block[i],
156
+ cross_attention_dim=cross_attention_dim,
157
+ use_linear_projection=use_linear_projection,
158
+ upcast_attention=upcast_attention,
159
+ norm_num_groups=find_largest_factor(ctrl_out_channels, max_factor=max_norm_num_groups),
160
+ )
161
+ )
162
+
163
+ # After the resnet/attention application, information is added from control to base
164
+ # Addition requires change in number of channels
165
+ ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels))
166
+
167
+ if add_downsample:
168
+ # Before the downsampler application, information is concatted from base to control
169
+ # Concat doesn't require change in number of channels
170
+ base_to_ctrl.append(make_zero_conv(base_out_channels, base_out_channels))
171
+
172
+ downsamplers = Downsample2D(
173
+ ctrl_out_channels + base_out_channels, use_conv=True, out_channels=ctrl_out_channels, name="op"
174
+ )
175
+
176
+ # After the downsampler application, information is added from control to base
177
+ # Addition requires change in number of channels
178
+ ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels))
179
+ else:
180
+ downsamplers = None
181
+
182
+ down_block_components = DownBlockControlNetXSAdapter(
183
+ resnets=nn.ModuleList(resnets),
184
+ base_to_ctrl=nn.ModuleList(base_to_ctrl),
185
+ ctrl_to_base=nn.ModuleList(ctrl_to_base),
186
+ )
187
+
188
+ if has_crossattn:
189
+ down_block_components.attentions = nn.ModuleList(attentions)
190
+ if downsamplers is not None:
191
+ down_block_components.downsamplers = downsamplers
192
+
193
+ return down_block_components
194
+
195
+
196
+ def get_mid_block_adapter(
197
+ base_channels: int,
198
+ ctrl_channels: int,
199
+ temb_channels: Optional[int] = None,
200
+ max_norm_num_groups: Optional[int] = 32,
201
+ transformer_layers_per_block: int = 1,
202
+ num_attention_heads: Optional[int] = 1,
203
+ cross_attention_dim: Optional[int] = 1024,
204
+ upcast_attention: bool = False,
205
+ use_linear_projection: bool = True,
206
+ ):
207
+ # Before the midblock application, information is concatted from base to control.
208
+ # Concat doesn't require change in number of channels
209
+ base_to_ctrl = make_zero_conv(base_channels, base_channels)
210
+
211
+ midblock = UNetMidBlock2DCrossAttn(
212
+ transformer_layers_per_block=transformer_layers_per_block,
213
+ in_channels=ctrl_channels + base_channels,
214
+ out_channels=ctrl_channels,
215
+ temb_channels=temb_channels,
216
+ # number or norm groups must divide both in_channels and out_channels
217
+ resnet_groups=find_largest_factor(gcd(ctrl_channels, ctrl_channels + base_channels), max_norm_num_groups),
218
+ cross_attention_dim=cross_attention_dim,
219
+ num_attention_heads=num_attention_heads,
220
+ use_linear_projection=use_linear_projection,
221
+ upcast_attention=upcast_attention,
222
+ )
223
+
224
+ # After the midblock application, information is added from control to base
225
+ # Addition requires change in number of channels
226
+ ctrl_to_base = make_zero_conv(ctrl_channels, base_channels)
227
+
228
+ return MidBlockControlNetXSAdapter(base_to_ctrl=base_to_ctrl, midblock=midblock, ctrl_to_base=ctrl_to_base)
229
+
230
+
231
+ def get_up_block_adapter(
232
+ out_channels: int,
233
+ prev_output_channel: int,
234
+ ctrl_skip_channels: List[int],
235
+ ):
236
+ ctrl_to_base = []
237
+ num_layers = 3 # only support sd + sdxl
238
+ for i in range(num_layers):
239
+ resnet_in_channels = prev_output_channel if i == 0 else out_channels
240
+ ctrl_to_base.append(make_zero_conv(ctrl_skip_channels[i], resnet_in_channels))
241
+
242
+ return UpBlockControlNetXSAdapter(ctrl_to_base=nn.ModuleList(ctrl_to_base))
243
+
244
+
245
+ class ControlNetXSAdapter(ModelMixin, ConfigMixin):
246
+ r"""
247
+ A `ControlNetXSAdapter` model. To use it, pass it into a `UNetControlNetXSModel` (together with a
248
+ `UNet2DConditionModel` base model).
249
+
250
+ This model inherits from [`ModelMixin`] and [`ConfigMixin`]. Check the superclass documentation for it's generic
251
+ methods implemented for all models (such as downloading or saving).
252
+
253
+ Like `UNetControlNetXSModel`, `ControlNetXSAdapter` is compatible with StableDiffusion and StableDiffusion-XL. It's
254
+ default parameters are compatible with StableDiffusion.
255
+
256
+ Parameters:
257
+ conditioning_channels (`int`, defaults to 3):
258
+ Number of channels of conditioning input (e.g. an image)
259
+ conditioning_channel_order (`str`, defaults to `"rgb"`):
260
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
261
+ conditioning_embedding_out_channels (`tuple[int]`, defaults to `(16, 32, 96, 256)`):
262
+ The tuple of output channels for each block in the `controlnet_cond_embedding` layer.
263
+ time_embedding_mix (`float`, defaults to 1.0):
264
+ If 0, then only the control adapters's time embedding is used. If 1, then only the base unet's time
265
+ embedding is used. Otherwise, both are combined.
266
+ learn_time_embedding (`bool`, defaults to `False`):
267
+ Whether a time embedding should be learned. If yes, `UNetControlNetXSModel` will combine the time
268
+ embeddings of the base model and the control adapter. If no, `UNetControlNetXSModel` will use the base
269
+ model's time embedding.
270
+ num_attention_heads (`list[int]`, defaults to `[4]`):
271
+ The number of attention heads.
272
+ block_out_channels (`list[int]`, defaults to `[4, 8, 16, 16]`):
273
+ The tuple of output channels for each block.
274
+ base_block_out_channels (`list[int]`, defaults to `[320, 640, 1280, 1280]`):
275
+ The tuple of output channels for each block in the base unet.
276
+ cross_attention_dim (`int`, defaults to 1024):
277
+ The dimension of the cross attention features.
278
+ down_block_types (`list[str]`, defaults to `["CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D"]`):
279
+ The tuple of downsample blocks to use.
280
+ sample_size (`int`, defaults to 96):
281
+ Height and width of input/output sample.
282
+ transformer_layers_per_block (`Union[int, Tuple[int]]`, defaults to 1):
283
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
284
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
285
+ upcast_attention (`bool`, defaults to `True`):
286
+ Whether the attention computation should always be upcasted.
287
+ max_norm_num_groups (`int`, defaults to 32):
288
+ Maximum number of groups in group normal. The actual number will be the largest divisor of the respective
289
+ channels, that is <= max_norm_num_groups.
290
+ """
291
+
292
+ @register_to_config
293
+ def __init__(
294
+ self,
295
+ conditioning_channels: int = 3,
296
+ conditioning_channel_order: str = "rgb",
297
+ conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
298
+ time_embedding_mix: float = 1.0,
299
+ learn_time_embedding: bool = False,
300
+ num_attention_heads: Union[int, Tuple[int]] = 4,
301
+ block_out_channels: Tuple[int] = (4, 8, 16, 16),
302
+ base_block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
303
+ cross_attention_dim: int = 1024,
304
+ down_block_types: Tuple[str] = (
305
+ "CrossAttnDownBlock2D",
306
+ "CrossAttnDownBlock2D",
307
+ "CrossAttnDownBlock2D",
308
+ "DownBlock2D",
309
+ ),
310
+ sample_size: Optional[int] = 96,
311
+ transformer_layers_per_block: Union[int, Tuple[int]] = 1,
312
+ upcast_attention: bool = True,
313
+ max_norm_num_groups: int = 32,
314
+ use_linear_projection: bool = True,
315
+ ):
316
+ super().__init__()
317
+
318
+ time_embedding_input_dim = base_block_out_channels[0]
319
+ time_embedding_dim = base_block_out_channels[0] * 4
320
+
321
+ # Check inputs
322
+ if conditioning_channel_order not in ["rgb", "bgr"]:
323
+ raise ValueError(f"unknown `conditioning_channel_order`: {conditioning_channel_order}")
324
+
325
+ if len(block_out_channels) != len(down_block_types):
326
+ raise ValueError(
327
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
328
+ )
329
+
330
+ if not isinstance(transformer_layers_per_block, (list, tuple)):
331
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
332
+ if not isinstance(cross_attention_dim, (list, tuple)):
333
+ cross_attention_dim = [cross_attention_dim] * len(down_block_types)
334
+ # see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why `ControlNetXSAdapter` takes `num_attention_heads` instead of `attention_head_dim`
335
+ if not isinstance(num_attention_heads, (list, tuple)):
336
+ num_attention_heads = [num_attention_heads] * len(down_block_types)
337
+
338
+ if len(num_attention_heads) != len(down_block_types):
339
+ raise ValueError(
340
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
341
+ )
342
+
343
+ # 5 - Create conditioning hint embedding
344
+ self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
345
+ conditioning_embedding_channels=block_out_channels[0],
346
+ block_out_channels=conditioning_embedding_out_channels,
347
+ conditioning_channels=conditioning_channels,
348
+ )
349
+
350
+ # time
351
+ if learn_time_embedding:
352
+ self.time_embedding = TimestepEmbedding(time_embedding_input_dim, time_embedding_dim)
353
+ else:
354
+ self.time_embedding = None
355
+
356
+ self.down_blocks = nn.ModuleList([])
357
+ self.up_connections = nn.ModuleList([])
358
+
359
+ # input
360
+ self.conv_in = nn.Conv2d(4, block_out_channels[0], kernel_size=3, padding=1)
361
+ self.control_to_base_for_conv_in = make_zero_conv(block_out_channels[0], base_block_out_channels[0])
362
+
363
+ # down
364
+ base_out_channels = base_block_out_channels[0]
365
+ ctrl_out_channels = block_out_channels[0]
366
+ for i, down_block_type in enumerate(down_block_types):
367
+ base_in_channels = base_out_channels
368
+ base_out_channels = base_block_out_channels[i]
369
+ ctrl_in_channels = ctrl_out_channels
370
+ ctrl_out_channels = block_out_channels[i]
371
+ has_crossattn = "CrossAttn" in down_block_type
372
+ is_final_block = i == len(down_block_types) - 1
373
+
374
+ self.down_blocks.append(
375
+ get_down_block_adapter(
376
+ base_in_channels=base_in_channels,
377
+ base_out_channels=base_out_channels,
378
+ ctrl_in_channels=ctrl_in_channels,
379
+ ctrl_out_channels=ctrl_out_channels,
380
+ temb_channels=time_embedding_dim,
381
+ max_norm_num_groups=max_norm_num_groups,
382
+ has_crossattn=has_crossattn,
383
+ transformer_layers_per_block=transformer_layers_per_block[i],
384
+ num_attention_heads=num_attention_heads[i],
385
+ cross_attention_dim=cross_attention_dim[i],
386
+ add_downsample=not is_final_block,
387
+ upcast_attention=upcast_attention,
388
+ use_linear_projection=use_linear_projection,
389
+ )
390
+ )
391
+
392
+ # mid
393
+ self.mid_block = get_mid_block_adapter(
394
+ base_channels=base_block_out_channels[-1],
395
+ ctrl_channels=block_out_channels[-1],
396
+ temb_channels=time_embedding_dim,
397
+ transformer_layers_per_block=transformer_layers_per_block[-1],
398
+ num_attention_heads=num_attention_heads[-1],
399
+ cross_attention_dim=cross_attention_dim[-1],
400
+ upcast_attention=upcast_attention,
401
+ use_linear_projection=use_linear_projection,
402
+ )
403
+
404
+ # up
405
+ # The skip connection channels are the output of the conv_in and of all the down subblocks
406
+ ctrl_skip_channels = [block_out_channels[0]]
407
+ for i, out_channels in enumerate(block_out_channels):
408
+ number_of_subblocks = (
409
+ 3 if i < len(block_out_channels) - 1 else 2
410
+ ) # every block has 3 subblocks, except last one, which has 2 as it has no downsampler
411
+ ctrl_skip_channels.extend([out_channels] * number_of_subblocks)
412
+
413
+ reversed_base_block_out_channels = list(reversed(base_block_out_channels))
414
+
415
+ base_out_channels = reversed_base_block_out_channels[0]
416
+ for i in range(len(down_block_types)):
417
+ prev_base_output_channel = base_out_channels
418
+ base_out_channels = reversed_base_block_out_channels[i]
419
+ ctrl_skip_channels_ = [ctrl_skip_channels.pop() for _ in range(3)]
420
+
421
+ self.up_connections.append(
422
+ get_up_block_adapter(
423
+ out_channels=base_out_channels,
424
+ prev_output_channel=prev_base_output_channel,
425
+ ctrl_skip_channels=ctrl_skip_channels_,
426
+ )
427
+ )
428
+
429
+ @classmethod
430
+ def from_unet(
431
+ cls,
432
+ unet: UNet2DConditionModel,
433
+ size_ratio: Optional[float] = None,
434
+ block_out_channels: Optional[List[int]] = None,
435
+ num_attention_heads: Optional[List[int]] = None,
436
+ learn_time_embedding: bool = False,
437
+ time_embedding_mix: int = 1.0,
438
+ conditioning_channels: int = 3,
439
+ conditioning_channel_order: str = "rgb",
440
+ conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
441
+ ):
442
+ r"""
443
+ Instantiate a [`ControlNetXSAdapter`] from a [`UNet2DConditionModel`].
444
+
445
+ Parameters:
446
+ unet (`UNet2DConditionModel`):
447
+ The UNet model we want to control. The dimensions of the ControlNetXSAdapter will be adapted to it.
448
+ size_ratio (float, *optional*, defaults to `None`):
449
+ When given, block_out_channels is set to a fraction of the base model's block_out_channels. Either this
450
+ or `block_out_channels` must be given.
451
+ block_out_channels (`List[int]`, *optional*, defaults to `None`):
452
+ Down blocks output channels in control model. Either this or `size_ratio` must be given.
453
+ num_attention_heads (`List[int]`, *optional*, defaults to `None`):
454
+ The dimension of the attention heads. The naming seems a bit confusing and it is, see
455
+ https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why.
456
+ learn_time_embedding (`bool`, defaults to `False`):
457
+ Whether the `ControlNetXSAdapter` should learn a time embedding.
458
+ time_embedding_mix (`float`, defaults to 1.0):
459
+ If 0, then only the control adapter's time embedding is used. If 1, then only the base unet's time
460
+ embedding is used. Otherwise, both are combined.
461
+ conditioning_channels (`int`, defaults to 3):
462
+ Number of channels of conditioning input (e.g. an image)
463
+ conditioning_channel_order (`str`, defaults to `"rgb"`):
464
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
465
+ conditioning_embedding_out_channels (`Tuple[int]`, defaults to `(16, 32, 96, 256)`):
466
+ The tuple of output channel for each block in the `controlnet_cond_embedding` layer.
467
+ """
468
+
469
+ # Check input
470
+ fixed_size = block_out_channels is not None
471
+ relative_size = size_ratio is not None
472
+ if not (fixed_size ^ relative_size):
473
+ raise ValueError(
474
+ "Pass exactly one of `block_out_channels` (for absolute sizing) or `size_ratio` (for relative sizing)."
475
+ )
476
+
477
+ # Create model
478
+ block_out_channels = block_out_channels or [int(b * size_ratio) for b in unet.config.block_out_channels]
479
+ if num_attention_heads is None:
480
+ # The naming seems a bit confusing and it is, see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why.
481
+ num_attention_heads = unet.config.attention_head_dim
482
+
483
+ model = cls(
484
+ conditioning_channels=conditioning_channels,
485
+ conditioning_channel_order=conditioning_channel_order,
486
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
487
+ time_embedding_mix=time_embedding_mix,
488
+ learn_time_embedding=learn_time_embedding,
489
+ num_attention_heads=num_attention_heads,
490
+ block_out_channels=block_out_channels,
491
+ base_block_out_channels=unet.config.block_out_channels,
492
+ cross_attention_dim=unet.config.cross_attention_dim,
493
+ down_block_types=unet.config.down_block_types,
494
+ sample_size=unet.config.sample_size,
495
+ transformer_layers_per_block=unet.config.transformer_layers_per_block,
496
+ upcast_attention=unet.config.upcast_attention,
497
+ max_norm_num_groups=unet.config.norm_num_groups,
498
+ use_linear_projection=unet.config.use_linear_projection,
499
+ )
500
+
501
+ # ensure that the ControlNetXSAdapter is the same dtype as the UNet2DConditionModel
502
+ model.to(unet.dtype)
503
+
504
+ return model
505
+
506
+ def forward(self, *args, **kwargs):
507
+ raise ValueError(
508
+ "A ControlNetXSAdapter cannot be run by itself. Use it together with a UNet2DConditionModel to instantiate a UNetControlNetXSModel."
509
+ )
510
+
511
+
512
+ class UNetControlNetXSModel(ModelMixin, ConfigMixin):
513
+ r"""
514
+ A UNet fused with a ControlNet-XS adapter model
515
+
516
+ This model inherits from [`ModelMixin`] and [`ConfigMixin`]. Check the superclass documentation for it's generic
517
+ methods implemented for all models (such as downloading or saving).
518
+
519
+ `UNetControlNetXSModel` is compatible with StableDiffusion and StableDiffusion-XL. It's default parameters are
520
+ compatible with StableDiffusion.
521
+
522
+ It's parameters are either passed to the underlying `UNet2DConditionModel` or used exactly like in
523
+ `ControlNetXSAdapter` . See their documentation for details.
524
+ """
525
+
526
+ _supports_gradient_checkpointing = True
527
+
528
+ @register_to_config
529
+ def __init__(
530
+ self,
531
+ # unet configs
532
+ sample_size: Optional[int] = 96,
533
+ down_block_types: Tuple[str] = (
534
+ "CrossAttnDownBlock2D",
535
+ "CrossAttnDownBlock2D",
536
+ "CrossAttnDownBlock2D",
537
+ "DownBlock2D",
538
+ ),
539
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
540
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
541
+ norm_num_groups: Optional[int] = 32,
542
+ cross_attention_dim: Union[int, Tuple[int]] = 1024,
543
+ transformer_layers_per_block: Union[int, Tuple[int]] = 1,
544
+ num_attention_heads: Union[int, Tuple[int]] = 8,
545
+ addition_embed_type: Optional[str] = None,
546
+ addition_time_embed_dim: Optional[int] = None,
547
+ upcast_attention: bool = True,
548
+ use_linear_projection: bool = True,
549
+ time_cond_proj_dim: Optional[int] = None,
550
+ projection_class_embeddings_input_dim: Optional[int] = None,
551
+ # additional controlnet configs
552
+ time_embedding_mix: float = 1.0,
553
+ ctrl_conditioning_channels: int = 3,
554
+ ctrl_conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
555
+ ctrl_conditioning_channel_order: str = "rgb",
556
+ ctrl_learn_time_embedding: bool = False,
557
+ ctrl_block_out_channels: Tuple[int] = (4, 8, 16, 16),
558
+ ctrl_num_attention_heads: Union[int, Tuple[int]] = 4,
559
+ ctrl_max_norm_num_groups: int = 32,
560
+ ):
561
+ super().__init__()
562
+
563
+ if time_embedding_mix < 0 or time_embedding_mix > 1:
564
+ raise ValueError("`time_embedding_mix` needs to be between 0 and 1.")
565
+ if time_embedding_mix < 1 and not ctrl_learn_time_embedding:
566
+ raise ValueError("To use `time_embedding_mix` < 1, `ctrl_learn_time_embedding` must be `True`")
567
+
568
+ if addition_embed_type is not None and addition_embed_type != "text_time":
569
+ raise ValueError(
570
+ "As `UNetControlNetXSModel` currently only supports StableDiffusion and StableDiffusion-XL, `addition_embed_type` must be `None` or `'text_time'`."
571
+ )
572
+
573
+ if not isinstance(transformer_layers_per_block, (list, tuple)):
574
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
575
+ if not isinstance(cross_attention_dim, (list, tuple)):
576
+ cross_attention_dim = [cross_attention_dim] * len(down_block_types)
577
+ if not isinstance(num_attention_heads, (list, tuple)):
578
+ num_attention_heads = [num_attention_heads] * len(down_block_types)
579
+ if not isinstance(ctrl_num_attention_heads, (list, tuple)):
580
+ ctrl_num_attention_heads = [ctrl_num_attention_heads] * len(down_block_types)
581
+
582
+ base_num_attention_heads = num_attention_heads
583
+
584
+ self.in_channels = 4
585
+
586
+ # # Input
587
+ self.base_conv_in = nn.Conv2d(4, block_out_channels[0], kernel_size=3, padding=1)
588
+ self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
589
+ conditioning_embedding_channels=ctrl_block_out_channels[0],
590
+ block_out_channels=ctrl_conditioning_embedding_out_channels,
591
+ conditioning_channels=ctrl_conditioning_channels,
592
+ )
593
+ self.ctrl_conv_in = nn.Conv2d(4, ctrl_block_out_channels[0], kernel_size=3, padding=1)
594
+ self.control_to_base_for_conv_in = make_zero_conv(ctrl_block_out_channels[0], block_out_channels[0])
595
+
596
+ # # Time
597
+ time_embed_input_dim = block_out_channels[0]
598
+ time_embed_dim = block_out_channels[0] * 4
599
+
600
+ self.base_time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos=True, downscale_freq_shift=0)
601
+ self.base_time_embedding = TimestepEmbedding(
602
+ time_embed_input_dim,
603
+ time_embed_dim,
604
+ cond_proj_dim=time_cond_proj_dim,
605
+ )
606
+ if ctrl_learn_time_embedding:
607
+ self.ctrl_time_embedding = TimestepEmbedding(
608
+ in_channels=time_embed_input_dim, time_embed_dim=time_embed_dim
609
+ )
610
+ else:
611
+ self.ctrl_time_embedding = None
612
+
613
+ if addition_embed_type is None:
614
+ self.base_add_time_proj = None
615
+ self.base_add_embedding = None
616
+ else:
617
+ self.base_add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos=True, downscale_freq_shift=0)
618
+ self.base_add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
619
+
620
+ # # Create down blocks
621
+ down_blocks = []
622
+ base_out_channels = block_out_channels[0]
623
+ ctrl_out_channels = ctrl_block_out_channels[0]
624
+ for i, down_block_type in enumerate(down_block_types):
625
+ base_in_channels = base_out_channels
626
+ base_out_channels = block_out_channels[i]
627
+ ctrl_in_channels = ctrl_out_channels
628
+ ctrl_out_channels = ctrl_block_out_channels[i]
629
+ has_crossattn = "CrossAttn" in down_block_type
630
+ is_final_block = i == len(down_block_types) - 1
631
+
632
+ down_blocks.append(
633
+ ControlNetXSCrossAttnDownBlock2D(
634
+ base_in_channels=base_in_channels,
635
+ base_out_channels=base_out_channels,
636
+ ctrl_in_channels=ctrl_in_channels,
637
+ ctrl_out_channels=ctrl_out_channels,
638
+ temb_channels=time_embed_dim,
639
+ norm_num_groups=norm_num_groups,
640
+ ctrl_max_norm_num_groups=ctrl_max_norm_num_groups,
641
+ has_crossattn=has_crossattn,
642
+ transformer_layers_per_block=transformer_layers_per_block[i],
643
+ base_num_attention_heads=base_num_attention_heads[i],
644
+ ctrl_num_attention_heads=ctrl_num_attention_heads[i],
645
+ cross_attention_dim=cross_attention_dim[i],
646
+ add_downsample=not is_final_block,
647
+ upcast_attention=upcast_attention,
648
+ use_linear_projection=use_linear_projection,
649
+ )
650
+ )
651
+
652
+ # # Create mid block
653
+ self.mid_block = ControlNetXSCrossAttnMidBlock2D(
654
+ base_channels=block_out_channels[-1],
655
+ ctrl_channels=ctrl_block_out_channels[-1],
656
+ temb_channels=time_embed_dim,
657
+ norm_num_groups=norm_num_groups,
658
+ ctrl_max_norm_num_groups=ctrl_max_norm_num_groups,
659
+ transformer_layers_per_block=transformer_layers_per_block[-1],
660
+ base_num_attention_heads=base_num_attention_heads[-1],
661
+ ctrl_num_attention_heads=ctrl_num_attention_heads[-1],
662
+ cross_attention_dim=cross_attention_dim[-1],
663
+ upcast_attention=upcast_attention,
664
+ use_linear_projection=use_linear_projection,
665
+ )
666
+
667
+ # # Create up blocks
668
+ up_blocks = []
669
+ rev_transformer_layers_per_block = list(reversed(transformer_layers_per_block))
670
+ rev_num_attention_heads = list(reversed(base_num_attention_heads))
671
+ rev_cross_attention_dim = list(reversed(cross_attention_dim))
672
+
673
+ # The skip connection channels are the output of the conv_in and of all the down subblocks
674
+ ctrl_skip_channels = [ctrl_block_out_channels[0]]
675
+ for i, out_channels in enumerate(ctrl_block_out_channels):
676
+ number_of_subblocks = (
677
+ 3 if i < len(ctrl_block_out_channels) - 1 else 2
678
+ ) # every block has 3 subblocks, except last one, which has 2 as it has no downsampler
679
+ ctrl_skip_channels.extend([out_channels] * number_of_subblocks)
680
+
681
+ reversed_block_out_channels = list(reversed(block_out_channels))
682
+
683
+ out_channels = reversed_block_out_channels[0]
684
+ for i, up_block_type in enumerate(up_block_types):
685
+ prev_output_channel = out_channels
686
+ out_channels = reversed_block_out_channels[i]
687
+ in_channels = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
688
+ ctrl_skip_channels_ = [ctrl_skip_channels.pop() for _ in range(3)]
689
+
690
+ has_crossattn = "CrossAttn" in up_block_type
691
+ is_final_block = i == len(block_out_channels) - 1
692
+
693
+ up_blocks.append(
694
+ ControlNetXSCrossAttnUpBlock2D(
695
+ in_channels=in_channels,
696
+ out_channels=out_channels,
697
+ prev_output_channel=prev_output_channel,
698
+ ctrl_skip_channels=ctrl_skip_channels_,
699
+ temb_channels=time_embed_dim,
700
+ resolution_idx=i,
701
+ has_crossattn=has_crossattn,
702
+ transformer_layers_per_block=rev_transformer_layers_per_block[i],
703
+ num_attention_heads=rev_num_attention_heads[i],
704
+ cross_attention_dim=rev_cross_attention_dim[i],
705
+ add_upsample=not is_final_block,
706
+ upcast_attention=upcast_attention,
707
+ norm_num_groups=norm_num_groups,
708
+ use_linear_projection=use_linear_projection,
709
+ )
710
+ )
711
+
712
+ self.down_blocks = nn.ModuleList(down_blocks)
713
+ self.up_blocks = nn.ModuleList(up_blocks)
714
+
715
+ self.base_conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups)
716
+ self.base_conv_act = nn.SiLU()
717
+ self.base_conv_out = nn.Conv2d(block_out_channels[0], 4, kernel_size=3, padding=1)
718
+
719
+ @classmethod
720
+ def from_unet(
721
+ cls,
722
+ unet: UNet2DConditionModel,
723
+ controlnet: Optional[ControlNetXSAdapter] = None,
724
+ size_ratio: Optional[float] = None,
725
+ ctrl_block_out_channels: Optional[List[float]] = None,
726
+ time_embedding_mix: Optional[float] = None,
727
+ ctrl_optional_kwargs: Optional[Dict] = None,
728
+ ):
729
+ r"""
730
+ Instantiate a [`UNetControlNetXSModel`] from a [`UNet2DConditionModel`] and an optional [`ControlNetXSAdapter`]
731
+ .
732
+
733
+ Parameters:
734
+ unet (`UNet2DConditionModel`):
735
+ The UNet model we want to control.
736
+ controlnet (`ControlNetXSAdapter`):
737
+ The ControlNet-XS adapter with which the UNet will be fused. If none is given, a new ControlNet-XS
738
+ adapter will be created.
739
+ size_ratio (float, *optional*, defaults to `None`):
740
+ Used to construct the controlnet if none is given. See [`ControlNetXSAdapter.from_unet`] for details.
741
+ ctrl_block_out_channels (`List[int]`, *optional*, defaults to `None`):
742
+ Used to construct the controlnet if none is given. See [`ControlNetXSAdapter.from_unet`] for details,
743
+ where this parameter is called `block_out_channels`.
744
+ time_embedding_mix (`float`, *optional*, defaults to None):
745
+ Used to construct the controlnet if none is given. See [`ControlNetXSAdapter.from_unet`] for details.
746
+ ctrl_optional_kwargs (`Dict`, *optional*, defaults to `None`):
747
+ Passed to the `init` of the new controlnet if no controlnet was given.
748
+ """
749
+ if controlnet is None:
750
+ controlnet = ControlNetXSAdapter.from_unet(
751
+ unet, size_ratio, ctrl_block_out_channels, **ctrl_optional_kwargs
752
+ )
753
+ else:
754
+ if any(
755
+ o is not None for o in (size_ratio, ctrl_block_out_channels, time_embedding_mix, ctrl_optional_kwargs)
756
+ ):
757
+ raise ValueError(
758
+ "When a controlnet is passed, none of these parameters should be passed: size_ratio, ctrl_block_out_channels, time_embedding_mix, ctrl_optional_kwargs."
759
+ )
760
+
761
+ # # get params
762
+ params_for_unet = [
763
+ "sample_size",
764
+ "down_block_types",
765
+ "up_block_types",
766
+ "block_out_channels",
767
+ "norm_num_groups",
768
+ "cross_attention_dim",
769
+ "transformer_layers_per_block",
770
+ "addition_embed_type",
771
+ "addition_time_embed_dim",
772
+ "upcast_attention",
773
+ "use_linear_projection",
774
+ "time_cond_proj_dim",
775
+ "projection_class_embeddings_input_dim",
776
+ ]
777
+ params_for_unet = {k: v for k, v in unet.config.items() if k in params_for_unet}
778
+ # The naming seems a bit confusing and it is, see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why.
779
+ params_for_unet["num_attention_heads"] = unet.config.attention_head_dim
780
+
781
+ params_for_controlnet = [
782
+ "conditioning_channels",
783
+ "conditioning_embedding_out_channels",
784
+ "conditioning_channel_order",
785
+ "learn_time_embedding",
786
+ "block_out_channels",
787
+ "num_attention_heads",
788
+ "max_norm_num_groups",
789
+ ]
790
+ params_for_controlnet = {"ctrl_" + k: v for k, v in controlnet.config.items() if k in params_for_controlnet}
791
+ params_for_controlnet["time_embedding_mix"] = controlnet.config.time_embedding_mix
792
+
793
+ # # create model
794
+ model = cls.from_config({**params_for_unet, **params_for_controlnet})
795
+
796
+ # # load weights
797
+ # from unet
798
+ modules_from_unet = [
799
+ "time_embedding",
800
+ "conv_in",
801
+ "conv_norm_out",
802
+ "conv_out",
803
+ ]
804
+ for m in modules_from_unet:
805
+ getattr(model, "base_" + m).load_state_dict(getattr(unet, m).state_dict())
806
+
807
+ optional_modules_from_unet = [
808
+ "add_time_proj",
809
+ "add_embedding",
810
+ ]
811
+ for m in optional_modules_from_unet:
812
+ if hasattr(unet, m) and getattr(unet, m) is not None:
813
+ getattr(model, "base_" + m).load_state_dict(getattr(unet, m).state_dict())
814
+
815
+ # from controlnet
816
+ model.controlnet_cond_embedding.load_state_dict(controlnet.controlnet_cond_embedding.state_dict())
817
+ model.ctrl_conv_in.load_state_dict(controlnet.conv_in.state_dict())
818
+ if controlnet.time_embedding is not None:
819
+ model.ctrl_time_embedding.load_state_dict(controlnet.time_embedding.state_dict())
820
+ model.control_to_base_for_conv_in.load_state_dict(controlnet.control_to_base_for_conv_in.state_dict())
821
+
822
+ # from both
823
+ model.down_blocks = nn.ModuleList(
824
+ ControlNetXSCrossAttnDownBlock2D.from_modules(b, c)
825
+ for b, c in zip(unet.down_blocks, controlnet.down_blocks)
826
+ )
827
+ model.mid_block = ControlNetXSCrossAttnMidBlock2D.from_modules(unet.mid_block, controlnet.mid_block)
828
+ model.up_blocks = nn.ModuleList(
829
+ ControlNetXSCrossAttnUpBlock2D.from_modules(b, c)
830
+ for b, c in zip(unet.up_blocks, controlnet.up_connections)
831
+ )
832
+
833
+ # ensure that the UNetControlNetXSModel is the same dtype as the UNet2DConditionModel
834
+ model.to(unet.dtype)
835
+
836
+ return model
837
+
838
+ def freeze_unet_params(self) -> None:
839
+ """Freeze the weights of the parts belonging to the base UNet2DConditionModel, and leave everything else unfrozen for fine
840
+ tuning."""
841
+ # Freeze everything
842
+ for param in self.parameters():
843
+ param.requires_grad = True
844
+
845
+ # Unfreeze ControlNetXSAdapter
846
+ base_parts = [
847
+ "base_time_proj",
848
+ "base_time_embedding",
849
+ "base_add_time_proj",
850
+ "base_add_embedding",
851
+ "base_conv_in",
852
+ "base_conv_norm_out",
853
+ "base_conv_act",
854
+ "base_conv_out",
855
+ ]
856
+ base_parts = [getattr(self, part) for part in base_parts if getattr(self, part) is not None]
857
+ for part in base_parts:
858
+ for param in part.parameters():
859
+ param.requires_grad = False
860
+
861
+ for d in self.down_blocks:
862
+ d.freeze_base_params()
863
+ self.mid_block.freeze_base_params()
864
+ for u in self.up_blocks:
865
+ u.freeze_base_params()
866
+
867
+ @property
868
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
869
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
870
+ r"""
871
+ Returns:
872
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
873
+ indexed by its weight name.
874
+ """
875
+ # set recursively
876
+ processors = {}
877
+
878
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
879
+ if hasattr(module, "get_processor"):
880
+ processors[f"{name}.processor"] = module.get_processor()
881
+
882
+ for sub_name, child in module.named_children():
883
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
884
+
885
+ return processors
886
+
887
+ for name, module in self.named_children():
888
+ fn_recursive_add_processors(name, module, processors)
889
+
890
+ return processors
891
+
892
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
893
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
894
+ r"""
895
+ Sets the attention processor to use to compute attention.
896
+
897
+ Parameters:
898
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
899
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
900
+ for **all** `Attention` layers.
901
+
902
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
903
+ processor. This is strongly recommended when setting trainable attention processors.
904
+
905
+ """
906
+ count = len(self.attn_processors.keys())
907
+
908
+ if isinstance(processor, dict) and len(processor) != count:
909
+ raise ValueError(
910
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
911
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
912
+ )
913
+
914
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
915
+ if hasattr(module, "set_processor"):
916
+ if not isinstance(processor, dict):
917
+ module.set_processor(processor)
918
+ else:
919
+ module.set_processor(processor.pop(f"{name}.processor"))
920
+
921
+ for sub_name, child in module.named_children():
922
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
923
+
924
+ for name, module in self.named_children():
925
+ fn_recursive_attn_processor(name, module, processor)
926
+
927
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
928
+ def set_default_attn_processor(self):
929
+ """
930
+ Disables custom attention processors and sets the default attention implementation.
931
+ """
932
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
933
+ processor = AttnAddedKVProcessor()
934
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
935
+ processor = AttnProcessor()
936
+ else:
937
+ raise ValueError(
938
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
939
+ )
940
+
941
+ self.set_attn_processor(processor)
942
+
943
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu
944
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
945
+ r"""Enables the FreeU mechanism from https://huggingface.co/papers/2309.11497.
946
+
947
+ The suffixes after the scaling factors represent the stage blocks where they are being applied.
948
+
949
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
950
+ are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
951
+
952
+ Args:
953
+ s1 (`float`):
954
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
955
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
956
+ s2 (`float`):
957
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
958
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
959
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
960
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
961
+ """
962
+ for i, upsample_block in enumerate(self.up_blocks):
963
+ setattr(upsample_block, "s1", s1)
964
+ setattr(upsample_block, "s2", s2)
965
+ setattr(upsample_block, "b1", b1)
966
+ setattr(upsample_block, "b2", b2)
967
+
968
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu
969
+ def disable_freeu(self):
970
+ """Disables the FreeU mechanism."""
971
+ freeu_keys = {"s1", "s2", "b1", "b2"}
972
+ for i, upsample_block in enumerate(self.up_blocks):
973
+ for k in freeu_keys:
974
+ if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
975
+ setattr(upsample_block, k, None)
976
+
977
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
978
+ def fuse_qkv_projections(self):
979
+ """
980
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
981
+ are fused. For cross-attention modules, key and value projection matrices are fused.
982
+
983
+ <Tip warning={true}>
984
+
985
+ This API is 🧪 experimental.
986
+
987
+ </Tip>
988
+ """
989
+ self.original_attn_processors = None
990
+
991
+ for _, attn_processor in self.attn_processors.items():
992
+ if "Added" in str(attn_processor.__class__.__name__):
993
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
994
+
995
+ self.original_attn_processors = self.attn_processors
996
+
997
+ for module in self.modules():
998
+ if isinstance(module, Attention):
999
+ module.fuse_projections(fuse=True)
1000
+
1001
+ self.set_attn_processor(FusedAttnProcessor2_0())
1002
+
1003
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
1004
+ def unfuse_qkv_projections(self):
1005
+ """Disables the fused QKV projection if enabled.
1006
+
1007
+ <Tip warning={true}>
1008
+
1009
+ This API is 🧪 experimental.
1010
+
1011
+ </Tip>
1012
+
1013
+ """
1014
+ if self.original_attn_processors is not None:
1015
+ self.set_attn_processor(self.original_attn_processors)
1016
+
1017
+ def forward(
1018
+ self,
1019
+ sample: Tensor,
1020
+ timestep: Union[torch.Tensor, float, int],
1021
+ encoder_hidden_states: torch.Tensor,
1022
+ controlnet_cond: Optional[torch.Tensor] = None,
1023
+ conditioning_scale: Optional[float] = 1.0,
1024
+ class_labels: Optional[torch.Tensor] = None,
1025
+ timestep_cond: Optional[torch.Tensor] = None,
1026
+ attention_mask: Optional[torch.Tensor] = None,
1027
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1028
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
1029
+ return_dict: bool = True,
1030
+ apply_control: bool = True,
1031
+ ) -> Union[ControlNetXSOutput, Tuple]:
1032
+ """
1033
+ The [`ControlNetXSModel`] forward method.
1034
+
1035
+ Args:
1036
+ sample (`Tensor`):
1037
+ The noisy input tensor.
1038
+ timestep (`Union[torch.Tensor, float, int]`):
1039
+ The number of timesteps to denoise an input.
1040
+ encoder_hidden_states (`torch.Tensor`):
1041
+ The encoder hidden states.
1042
+ controlnet_cond (`Tensor`):
1043
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
1044
+ conditioning_scale (`float`, defaults to `1.0`):
1045
+ How much the control model affects the base model outputs.
1046
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
1047
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
1048
+ timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
1049
+ Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
1050
+ timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
1051
+ embeddings.
1052
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
1053
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
1054
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
1055
+ negative values to the attention scores corresponding to "discard" tokens.
1056
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
1057
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
1058
+ added_cond_kwargs (`dict`):
1059
+ Additional conditions for the Stable Diffusion XL UNet.
1060
+ return_dict (`bool`, defaults to `True`):
1061
+ Whether or not to return a [`~models.controlnets.controlnet.ControlNetOutput`] instead of a plain
1062
+ tuple.
1063
+ apply_control (`bool`, defaults to `True`):
1064
+ If `False`, the input is run only through the base model.
1065
+
1066
+ Returns:
1067
+ [`~models.controlnetxs.ControlNetXSOutput`] **or** `tuple`:
1068
+ If `return_dict` is `True`, a [`~models.controlnetxs.ControlNetXSOutput`] is returned, otherwise a
1069
+ tuple is returned where the first element is the sample tensor.
1070
+ """
1071
+
1072
+ # check channel order
1073
+ if self.config.ctrl_conditioning_channel_order == "bgr":
1074
+ controlnet_cond = torch.flip(controlnet_cond, dims=[1])
1075
+
1076
+ # prepare attention_mask
1077
+ if attention_mask is not None:
1078
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
1079
+ attention_mask = attention_mask.unsqueeze(1)
1080
+
1081
+ # 1. time
1082
+ timesteps = timestep
1083
+ if not torch.is_tensor(timesteps):
1084
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
1085
+ # This would be a good case for the `match` statement (Python 3.10+)
1086
+ is_mps = sample.device.type == "mps"
1087
+ is_npu = sample.device.type == "npu"
1088
+ if isinstance(timestep, float):
1089
+ dtype = torch.float32 if (is_mps or is_npu) else torch.float64
1090
+ else:
1091
+ dtype = torch.int32 if (is_mps or is_npu) else torch.int64
1092
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
1093
+ elif len(timesteps.shape) == 0:
1094
+ timesteps = timesteps[None].to(sample.device)
1095
+
1096
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
1097
+ timesteps = timesteps.expand(sample.shape[0])
1098
+
1099
+ t_emb = self.base_time_proj(timesteps)
1100
+
1101
+ # timesteps does not contain any weights and will always return f32 tensors
1102
+ # but time_embedding might actually be running in fp16. so we need to cast here.
1103
+ # there might be better ways to encapsulate this.
1104
+ t_emb = t_emb.to(dtype=sample.dtype)
1105
+
1106
+ if self.config.ctrl_learn_time_embedding and apply_control:
1107
+ ctrl_temb = self.ctrl_time_embedding(t_emb, timestep_cond)
1108
+ base_temb = self.base_time_embedding(t_emb, timestep_cond)
1109
+ interpolation_param = self.config.time_embedding_mix**0.3
1110
+
1111
+ temb = ctrl_temb * interpolation_param + base_temb * (1 - interpolation_param)
1112
+ else:
1113
+ temb = self.base_time_embedding(t_emb)
1114
+
1115
+ # added time & text embeddings
1116
+ aug_emb = None
1117
+
1118
+ if self.config.addition_embed_type is None:
1119
+ pass
1120
+ elif self.config.addition_embed_type == "text_time":
1121
+ # SDXL - style
1122
+ if "text_embeds" not in added_cond_kwargs:
1123
+ raise ValueError(
1124
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
1125
+ )
1126
+ text_embeds = added_cond_kwargs.get("text_embeds")
1127
+ if "time_ids" not in added_cond_kwargs:
1128
+ raise ValueError(
1129
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
1130
+ )
1131
+ time_ids = added_cond_kwargs.get("time_ids")
1132
+ time_embeds = self.base_add_time_proj(time_ids.flatten())
1133
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
1134
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
1135
+ add_embeds = add_embeds.to(temb.dtype)
1136
+ aug_emb = self.base_add_embedding(add_embeds)
1137
+ else:
1138
+ raise ValueError(
1139
+ f"ControlNet-XS currently only supports StableDiffusion and StableDiffusion-XL, so addition_embed_type = {self.config.addition_embed_type} is currently not supported."
1140
+ )
1141
+
1142
+ temb = temb + aug_emb if aug_emb is not None else temb
1143
+
1144
+ # text embeddings
1145
+ cemb = encoder_hidden_states
1146
+
1147
+ # Preparation
1148
+ h_ctrl = h_base = sample
1149
+ hs_base, hs_ctrl = [], []
1150
+
1151
+ # Cross Control
1152
+ guided_hint = self.controlnet_cond_embedding(controlnet_cond)
1153
+
1154
+ # 1 - conv in & down
1155
+
1156
+ h_base = self.base_conv_in(h_base)
1157
+ h_ctrl = self.ctrl_conv_in(h_ctrl)
1158
+ if guided_hint is not None:
1159
+ h_ctrl += guided_hint
1160
+ if apply_control:
1161
+ h_base = h_base + self.control_to_base_for_conv_in(h_ctrl) * conditioning_scale # add ctrl -> base
1162
+
1163
+ hs_base.append(h_base)
1164
+ hs_ctrl.append(h_ctrl)
1165
+
1166
+ for down in self.down_blocks:
1167
+ h_base, h_ctrl, residual_hb, residual_hc = down(
1168
+ hidden_states_base=h_base,
1169
+ hidden_states_ctrl=h_ctrl,
1170
+ temb=temb,
1171
+ encoder_hidden_states=cemb,
1172
+ conditioning_scale=conditioning_scale,
1173
+ cross_attention_kwargs=cross_attention_kwargs,
1174
+ attention_mask=attention_mask,
1175
+ apply_control=apply_control,
1176
+ )
1177
+ hs_base.extend(residual_hb)
1178
+ hs_ctrl.extend(residual_hc)
1179
+
1180
+ # 2 - mid
1181
+ h_base, h_ctrl = self.mid_block(
1182
+ hidden_states_base=h_base,
1183
+ hidden_states_ctrl=h_ctrl,
1184
+ temb=temb,
1185
+ encoder_hidden_states=cemb,
1186
+ conditioning_scale=conditioning_scale,
1187
+ cross_attention_kwargs=cross_attention_kwargs,
1188
+ attention_mask=attention_mask,
1189
+ apply_control=apply_control,
1190
+ )
1191
+
1192
+ # 3 - up
1193
+ for up in self.up_blocks:
1194
+ n_resnets = len(up.resnets)
1195
+ skips_hb = hs_base[-n_resnets:]
1196
+ skips_hc = hs_ctrl[-n_resnets:]
1197
+ hs_base = hs_base[:-n_resnets]
1198
+ hs_ctrl = hs_ctrl[:-n_resnets]
1199
+ h_base = up(
1200
+ hidden_states=h_base,
1201
+ res_hidden_states_tuple_base=skips_hb,
1202
+ res_hidden_states_tuple_ctrl=skips_hc,
1203
+ temb=temb,
1204
+ encoder_hidden_states=cemb,
1205
+ conditioning_scale=conditioning_scale,
1206
+ cross_attention_kwargs=cross_attention_kwargs,
1207
+ attention_mask=attention_mask,
1208
+ apply_control=apply_control,
1209
+ )
1210
+
1211
+ # 4 - conv out
1212
+ h_base = self.base_conv_norm_out(h_base)
1213
+ h_base = self.base_conv_act(h_base)
1214
+ h_base = self.base_conv_out(h_base)
1215
+
1216
+ if not return_dict:
1217
+ return (h_base,)
1218
+
1219
+ return ControlNetXSOutput(sample=h_base)
1220
+
1221
+
1222
+ class ControlNetXSCrossAttnDownBlock2D(nn.Module):
1223
+ def __init__(
1224
+ self,
1225
+ base_in_channels: int,
1226
+ base_out_channels: int,
1227
+ ctrl_in_channels: int,
1228
+ ctrl_out_channels: int,
1229
+ temb_channels: int,
1230
+ norm_num_groups: int = 32,
1231
+ ctrl_max_norm_num_groups: int = 32,
1232
+ has_crossattn=True,
1233
+ transformer_layers_per_block: Optional[Union[int, Tuple[int]]] = 1,
1234
+ base_num_attention_heads: Optional[int] = 1,
1235
+ ctrl_num_attention_heads: Optional[int] = 1,
1236
+ cross_attention_dim: Optional[int] = 1024,
1237
+ add_downsample: bool = True,
1238
+ upcast_attention: Optional[bool] = False,
1239
+ use_linear_projection: Optional[bool] = True,
1240
+ ):
1241
+ super().__init__()
1242
+ base_resnets = []
1243
+ base_attentions = []
1244
+ ctrl_resnets = []
1245
+ ctrl_attentions = []
1246
+ ctrl_to_base = []
1247
+ base_to_ctrl = []
1248
+
1249
+ num_layers = 2 # only support sd + sdxl
1250
+
1251
+ if isinstance(transformer_layers_per_block, int):
1252
+ transformer_layers_per_block = [transformer_layers_per_block] * num_layers
1253
+
1254
+ for i in range(num_layers):
1255
+ base_in_channels = base_in_channels if i == 0 else base_out_channels
1256
+ ctrl_in_channels = ctrl_in_channels if i == 0 else ctrl_out_channels
1257
+
1258
+ # Before the resnet/attention application, information is concatted from base to control.
1259
+ # Concat doesn't require change in number of channels
1260
+ base_to_ctrl.append(make_zero_conv(base_in_channels, base_in_channels))
1261
+
1262
+ base_resnets.append(
1263
+ ResnetBlock2D(
1264
+ in_channels=base_in_channels,
1265
+ out_channels=base_out_channels,
1266
+ temb_channels=temb_channels,
1267
+ groups=norm_num_groups,
1268
+ )
1269
+ )
1270
+ ctrl_resnets.append(
1271
+ ResnetBlock2D(
1272
+ in_channels=ctrl_in_channels + base_in_channels, # information from base is concatted to ctrl
1273
+ out_channels=ctrl_out_channels,
1274
+ temb_channels=temb_channels,
1275
+ groups=find_largest_factor(
1276
+ ctrl_in_channels + base_in_channels, max_factor=ctrl_max_norm_num_groups
1277
+ ),
1278
+ groups_out=find_largest_factor(ctrl_out_channels, max_factor=ctrl_max_norm_num_groups),
1279
+ eps=1e-5,
1280
+ )
1281
+ )
1282
+
1283
+ if has_crossattn:
1284
+ base_attentions.append(
1285
+ Transformer2DModel(
1286
+ base_num_attention_heads,
1287
+ base_out_channels // base_num_attention_heads,
1288
+ in_channels=base_out_channels,
1289
+ num_layers=transformer_layers_per_block[i],
1290
+ cross_attention_dim=cross_attention_dim,
1291
+ use_linear_projection=use_linear_projection,
1292
+ upcast_attention=upcast_attention,
1293
+ norm_num_groups=norm_num_groups,
1294
+ )
1295
+ )
1296
+ ctrl_attentions.append(
1297
+ Transformer2DModel(
1298
+ ctrl_num_attention_heads,
1299
+ ctrl_out_channels // ctrl_num_attention_heads,
1300
+ in_channels=ctrl_out_channels,
1301
+ num_layers=transformer_layers_per_block[i],
1302
+ cross_attention_dim=cross_attention_dim,
1303
+ use_linear_projection=use_linear_projection,
1304
+ upcast_attention=upcast_attention,
1305
+ norm_num_groups=find_largest_factor(ctrl_out_channels, max_factor=ctrl_max_norm_num_groups),
1306
+ )
1307
+ )
1308
+
1309
+ # After the resnet/attention application, information is added from control to base
1310
+ # Addition requires change in number of channels
1311
+ ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels))
1312
+
1313
+ if add_downsample:
1314
+ # Before the downsampler application, information is concatted from base to control
1315
+ # Concat doesn't require change in number of channels
1316
+ base_to_ctrl.append(make_zero_conv(base_out_channels, base_out_channels))
1317
+
1318
+ self.base_downsamplers = Downsample2D(
1319
+ base_out_channels, use_conv=True, out_channels=base_out_channels, name="op"
1320
+ )
1321
+ self.ctrl_downsamplers = Downsample2D(
1322
+ ctrl_out_channels + base_out_channels, use_conv=True, out_channels=ctrl_out_channels, name="op"
1323
+ )
1324
+
1325
+ # After the downsampler application, information is added from control to base
1326
+ # Addition requires change in number of channels
1327
+ ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels))
1328
+ else:
1329
+ self.base_downsamplers = None
1330
+ self.ctrl_downsamplers = None
1331
+
1332
+ self.base_resnets = nn.ModuleList(base_resnets)
1333
+ self.ctrl_resnets = nn.ModuleList(ctrl_resnets)
1334
+ self.base_attentions = nn.ModuleList(base_attentions) if has_crossattn else [None] * num_layers
1335
+ self.ctrl_attentions = nn.ModuleList(ctrl_attentions) if has_crossattn else [None] * num_layers
1336
+ self.base_to_ctrl = nn.ModuleList(base_to_ctrl)
1337
+ self.ctrl_to_base = nn.ModuleList(ctrl_to_base)
1338
+
1339
+ self.gradient_checkpointing = False
1340
+
1341
+ @classmethod
1342
+ def from_modules(cls, base_downblock: CrossAttnDownBlock2D, ctrl_downblock: DownBlockControlNetXSAdapter):
1343
+ # get params
1344
+ def get_first_cross_attention(block):
1345
+ return block.attentions[0].transformer_blocks[0].attn2
1346
+
1347
+ base_in_channels = base_downblock.resnets[0].in_channels
1348
+ base_out_channels = base_downblock.resnets[0].out_channels
1349
+ ctrl_in_channels = (
1350
+ ctrl_downblock.resnets[0].in_channels - base_in_channels
1351
+ ) # base channels are concatted to ctrl channels in init
1352
+ ctrl_out_channels = ctrl_downblock.resnets[0].out_channels
1353
+ temb_channels = base_downblock.resnets[0].time_emb_proj.in_features
1354
+ num_groups = base_downblock.resnets[0].norm1.num_groups
1355
+ ctrl_num_groups = ctrl_downblock.resnets[0].norm1.num_groups
1356
+ if hasattr(base_downblock, "attentions"):
1357
+ has_crossattn = True
1358
+ transformer_layers_per_block = len(base_downblock.attentions[0].transformer_blocks)
1359
+ base_num_attention_heads = get_first_cross_attention(base_downblock).heads
1360
+ ctrl_num_attention_heads = get_first_cross_attention(ctrl_downblock).heads
1361
+ cross_attention_dim = get_first_cross_attention(base_downblock).cross_attention_dim
1362
+ upcast_attention = get_first_cross_attention(base_downblock).upcast_attention
1363
+ use_linear_projection = base_downblock.attentions[0].use_linear_projection
1364
+ else:
1365
+ has_crossattn = False
1366
+ transformer_layers_per_block = None
1367
+ base_num_attention_heads = None
1368
+ ctrl_num_attention_heads = None
1369
+ cross_attention_dim = None
1370
+ upcast_attention = None
1371
+ use_linear_projection = None
1372
+ add_downsample = base_downblock.downsamplers is not None
1373
+
1374
+ # create model
1375
+ model = cls(
1376
+ base_in_channels=base_in_channels,
1377
+ base_out_channels=base_out_channels,
1378
+ ctrl_in_channels=ctrl_in_channels,
1379
+ ctrl_out_channels=ctrl_out_channels,
1380
+ temb_channels=temb_channels,
1381
+ norm_num_groups=num_groups,
1382
+ ctrl_max_norm_num_groups=ctrl_num_groups,
1383
+ has_crossattn=has_crossattn,
1384
+ transformer_layers_per_block=transformer_layers_per_block,
1385
+ base_num_attention_heads=base_num_attention_heads,
1386
+ ctrl_num_attention_heads=ctrl_num_attention_heads,
1387
+ cross_attention_dim=cross_attention_dim,
1388
+ add_downsample=add_downsample,
1389
+ upcast_attention=upcast_attention,
1390
+ use_linear_projection=use_linear_projection,
1391
+ )
1392
+
1393
+ # # load weights
1394
+ model.base_resnets.load_state_dict(base_downblock.resnets.state_dict())
1395
+ model.ctrl_resnets.load_state_dict(ctrl_downblock.resnets.state_dict())
1396
+ if has_crossattn:
1397
+ model.base_attentions.load_state_dict(base_downblock.attentions.state_dict())
1398
+ model.ctrl_attentions.load_state_dict(ctrl_downblock.attentions.state_dict())
1399
+ if add_downsample:
1400
+ model.base_downsamplers.load_state_dict(base_downblock.downsamplers[0].state_dict())
1401
+ model.ctrl_downsamplers.load_state_dict(ctrl_downblock.downsamplers.state_dict())
1402
+ model.base_to_ctrl.load_state_dict(ctrl_downblock.base_to_ctrl.state_dict())
1403
+ model.ctrl_to_base.load_state_dict(ctrl_downblock.ctrl_to_base.state_dict())
1404
+
1405
+ return model
1406
+
1407
+ def freeze_base_params(self) -> None:
1408
+ """Freeze the weights of the parts belonging to the base UNet2DConditionModel, and leave everything else unfrozen for fine
1409
+ tuning."""
1410
+ # Unfreeze everything
1411
+ for param in self.parameters():
1412
+ param.requires_grad = True
1413
+
1414
+ # Freeze base part
1415
+ base_parts = [self.base_resnets]
1416
+ if isinstance(self.base_attentions, nn.ModuleList): # attentions can be a list of Nones
1417
+ base_parts.append(self.base_attentions)
1418
+ if self.base_downsamplers is not None:
1419
+ base_parts.append(self.base_downsamplers)
1420
+ for part in base_parts:
1421
+ for param in part.parameters():
1422
+ param.requires_grad = False
1423
+
1424
+ def forward(
1425
+ self,
1426
+ hidden_states_base: Tensor,
1427
+ temb: Tensor,
1428
+ encoder_hidden_states: Optional[Tensor] = None,
1429
+ hidden_states_ctrl: Optional[Tensor] = None,
1430
+ conditioning_scale: Optional[float] = 1.0,
1431
+ attention_mask: Optional[Tensor] = None,
1432
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1433
+ encoder_attention_mask: Optional[Tensor] = None,
1434
+ apply_control: bool = True,
1435
+ ) -> Tuple[Tensor, Tensor, Tuple[Tensor, ...], Tuple[Tensor, ...]]:
1436
+ if cross_attention_kwargs is not None:
1437
+ if cross_attention_kwargs.get("scale", None) is not None:
1438
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
1439
+
1440
+ h_base = hidden_states_base
1441
+ h_ctrl = hidden_states_ctrl
1442
+
1443
+ base_output_states = ()
1444
+ ctrl_output_states = ()
1445
+
1446
+ base_blocks = list(zip(self.base_resnets, self.base_attentions))
1447
+ ctrl_blocks = list(zip(self.ctrl_resnets, self.ctrl_attentions))
1448
+
1449
+ for (b_res, b_attn), (c_res, c_attn), b2c, c2b in zip(
1450
+ base_blocks, ctrl_blocks, self.base_to_ctrl, self.ctrl_to_base
1451
+ ):
1452
+ # concat base -> ctrl
1453
+ if apply_control:
1454
+ h_ctrl = torch.cat([h_ctrl, b2c(h_base)], dim=1)
1455
+
1456
+ # apply base subblock
1457
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1458
+ h_base = self._gradient_checkpointing_func(b_res, h_base, temb)
1459
+ else:
1460
+ h_base = b_res(h_base, temb)
1461
+
1462
+ if b_attn is not None:
1463
+ h_base = b_attn(
1464
+ h_base,
1465
+ encoder_hidden_states=encoder_hidden_states,
1466
+ cross_attention_kwargs=cross_attention_kwargs,
1467
+ attention_mask=attention_mask,
1468
+ encoder_attention_mask=encoder_attention_mask,
1469
+ return_dict=False,
1470
+ )[0]
1471
+
1472
+ # apply ctrl subblock
1473
+ if apply_control:
1474
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1475
+ h_ctrl = self._gradient_checkpointing_func(c_res, h_ctrl, temb)
1476
+ else:
1477
+ h_ctrl = c_res(h_ctrl, temb)
1478
+ if c_attn is not None:
1479
+ h_ctrl = c_attn(
1480
+ h_ctrl,
1481
+ encoder_hidden_states=encoder_hidden_states,
1482
+ cross_attention_kwargs=cross_attention_kwargs,
1483
+ attention_mask=attention_mask,
1484
+ encoder_attention_mask=encoder_attention_mask,
1485
+ return_dict=False,
1486
+ )[0]
1487
+
1488
+ # add ctrl -> base
1489
+ if apply_control:
1490
+ h_base = h_base + c2b(h_ctrl) * conditioning_scale
1491
+
1492
+ base_output_states = base_output_states + (h_base,)
1493
+ ctrl_output_states = ctrl_output_states + (h_ctrl,)
1494
+
1495
+ if self.base_downsamplers is not None: # if we have a base_downsampler, then also a ctrl_downsampler
1496
+ b2c = self.base_to_ctrl[-1]
1497
+ c2b = self.ctrl_to_base[-1]
1498
+
1499
+ # concat base -> ctrl
1500
+ if apply_control:
1501
+ h_ctrl = torch.cat([h_ctrl, b2c(h_base)], dim=1)
1502
+ # apply base subblock
1503
+ h_base = self.base_downsamplers(h_base)
1504
+ # apply ctrl subblock
1505
+ if apply_control:
1506
+ h_ctrl = self.ctrl_downsamplers(h_ctrl)
1507
+ # add ctrl -> base
1508
+ if apply_control:
1509
+ h_base = h_base + c2b(h_ctrl) * conditioning_scale
1510
+
1511
+ base_output_states = base_output_states + (h_base,)
1512
+ ctrl_output_states = ctrl_output_states + (h_ctrl,)
1513
+
1514
+ return h_base, h_ctrl, base_output_states, ctrl_output_states
1515
+
1516
+
1517
+ class ControlNetXSCrossAttnMidBlock2D(nn.Module):
1518
+ def __init__(
1519
+ self,
1520
+ base_channels: int,
1521
+ ctrl_channels: int,
1522
+ temb_channels: Optional[int] = None,
1523
+ norm_num_groups: int = 32,
1524
+ ctrl_max_norm_num_groups: int = 32,
1525
+ transformer_layers_per_block: int = 1,
1526
+ base_num_attention_heads: Optional[int] = 1,
1527
+ ctrl_num_attention_heads: Optional[int] = 1,
1528
+ cross_attention_dim: Optional[int] = 1024,
1529
+ upcast_attention: bool = False,
1530
+ use_linear_projection: Optional[bool] = True,
1531
+ ):
1532
+ super().__init__()
1533
+
1534
+ # Before the midblock application, information is concatted from base to control.
1535
+ # Concat doesn't require change in number of channels
1536
+ self.base_to_ctrl = make_zero_conv(base_channels, base_channels)
1537
+
1538
+ self.base_midblock = UNetMidBlock2DCrossAttn(
1539
+ transformer_layers_per_block=transformer_layers_per_block,
1540
+ in_channels=base_channels,
1541
+ temb_channels=temb_channels,
1542
+ resnet_groups=norm_num_groups,
1543
+ cross_attention_dim=cross_attention_dim,
1544
+ num_attention_heads=base_num_attention_heads,
1545
+ use_linear_projection=use_linear_projection,
1546
+ upcast_attention=upcast_attention,
1547
+ )
1548
+
1549
+ self.ctrl_midblock = UNetMidBlock2DCrossAttn(
1550
+ transformer_layers_per_block=transformer_layers_per_block,
1551
+ in_channels=ctrl_channels + base_channels,
1552
+ out_channels=ctrl_channels,
1553
+ temb_channels=temb_channels,
1554
+ # number or norm groups must divide both in_channels and out_channels
1555
+ resnet_groups=find_largest_factor(
1556
+ gcd(ctrl_channels, ctrl_channels + base_channels), ctrl_max_norm_num_groups
1557
+ ),
1558
+ cross_attention_dim=cross_attention_dim,
1559
+ num_attention_heads=ctrl_num_attention_heads,
1560
+ use_linear_projection=use_linear_projection,
1561
+ upcast_attention=upcast_attention,
1562
+ )
1563
+
1564
+ # After the midblock application, information is added from control to base
1565
+ # Addition requires change in number of channels
1566
+ self.ctrl_to_base = make_zero_conv(ctrl_channels, base_channels)
1567
+
1568
+ self.gradient_checkpointing = False
1569
+
1570
+ @classmethod
1571
+ def from_modules(
1572
+ cls,
1573
+ base_midblock: UNetMidBlock2DCrossAttn,
1574
+ ctrl_midblock: MidBlockControlNetXSAdapter,
1575
+ ):
1576
+ base_to_ctrl = ctrl_midblock.base_to_ctrl
1577
+ ctrl_to_base = ctrl_midblock.ctrl_to_base
1578
+ ctrl_midblock = ctrl_midblock.midblock
1579
+
1580
+ # get params
1581
+ def get_first_cross_attention(midblock):
1582
+ return midblock.attentions[0].transformer_blocks[0].attn2
1583
+
1584
+ base_channels = ctrl_to_base.out_channels
1585
+ ctrl_channels = ctrl_to_base.in_channels
1586
+ transformer_layers_per_block = len(base_midblock.attentions[0].transformer_blocks)
1587
+ temb_channels = base_midblock.resnets[0].time_emb_proj.in_features
1588
+ num_groups = base_midblock.resnets[0].norm1.num_groups
1589
+ ctrl_num_groups = ctrl_midblock.resnets[0].norm1.num_groups
1590
+ base_num_attention_heads = get_first_cross_attention(base_midblock).heads
1591
+ ctrl_num_attention_heads = get_first_cross_attention(ctrl_midblock).heads
1592
+ cross_attention_dim = get_first_cross_attention(base_midblock).cross_attention_dim
1593
+ upcast_attention = get_first_cross_attention(base_midblock).upcast_attention
1594
+ use_linear_projection = base_midblock.attentions[0].use_linear_projection
1595
+
1596
+ # create model
1597
+ model = cls(
1598
+ base_channels=base_channels,
1599
+ ctrl_channels=ctrl_channels,
1600
+ temb_channels=temb_channels,
1601
+ norm_num_groups=num_groups,
1602
+ ctrl_max_norm_num_groups=ctrl_num_groups,
1603
+ transformer_layers_per_block=transformer_layers_per_block,
1604
+ base_num_attention_heads=base_num_attention_heads,
1605
+ ctrl_num_attention_heads=ctrl_num_attention_heads,
1606
+ cross_attention_dim=cross_attention_dim,
1607
+ upcast_attention=upcast_attention,
1608
+ use_linear_projection=use_linear_projection,
1609
+ )
1610
+
1611
+ # load weights
1612
+ model.base_to_ctrl.load_state_dict(base_to_ctrl.state_dict())
1613
+ model.base_midblock.load_state_dict(base_midblock.state_dict())
1614
+ model.ctrl_midblock.load_state_dict(ctrl_midblock.state_dict())
1615
+ model.ctrl_to_base.load_state_dict(ctrl_to_base.state_dict())
1616
+
1617
+ return model
1618
+
1619
+ def freeze_base_params(self) -> None:
1620
+ """Freeze the weights of the parts belonging to the base UNet2DConditionModel, and leave everything else unfrozen for fine
1621
+ tuning."""
1622
+ # Unfreeze everything
1623
+ for param in self.parameters():
1624
+ param.requires_grad = True
1625
+
1626
+ # Freeze base part
1627
+ for param in self.base_midblock.parameters():
1628
+ param.requires_grad = False
1629
+
1630
+ def forward(
1631
+ self,
1632
+ hidden_states_base: Tensor,
1633
+ temb: Tensor,
1634
+ encoder_hidden_states: Tensor,
1635
+ hidden_states_ctrl: Optional[Tensor] = None,
1636
+ conditioning_scale: Optional[float] = 1.0,
1637
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1638
+ attention_mask: Optional[Tensor] = None,
1639
+ encoder_attention_mask: Optional[Tensor] = None,
1640
+ apply_control: bool = True,
1641
+ ) -> Tuple[Tensor, Tensor]:
1642
+ if cross_attention_kwargs is not None:
1643
+ if cross_attention_kwargs.get("scale", None) is not None:
1644
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
1645
+
1646
+ h_base = hidden_states_base
1647
+ h_ctrl = hidden_states_ctrl
1648
+
1649
+ joint_args = {
1650
+ "temb": temb,
1651
+ "encoder_hidden_states": encoder_hidden_states,
1652
+ "attention_mask": attention_mask,
1653
+ "cross_attention_kwargs": cross_attention_kwargs,
1654
+ "encoder_attention_mask": encoder_attention_mask,
1655
+ }
1656
+
1657
+ if apply_control:
1658
+ h_ctrl = torch.cat([h_ctrl, self.base_to_ctrl(h_base)], dim=1) # concat base -> ctrl
1659
+ h_base = self.base_midblock(h_base, **joint_args) # apply base mid block
1660
+ if apply_control:
1661
+ h_ctrl = self.ctrl_midblock(h_ctrl, **joint_args) # apply ctrl mid block
1662
+ h_base = h_base + self.ctrl_to_base(h_ctrl) * conditioning_scale # add ctrl -> base
1663
+
1664
+ return h_base, h_ctrl
1665
+
1666
+
1667
+ class ControlNetXSCrossAttnUpBlock2D(nn.Module):
1668
+ def __init__(
1669
+ self,
1670
+ in_channels: int,
1671
+ out_channels: int,
1672
+ prev_output_channel: int,
1673
+ ctrl_skip_channels: List[int],
1674
+ temb_channels: int,
1675
+ norm_num_groups: int = 32,
1676
+ resolution_idx: Optional[int] = None,
1677
+ has_crossattn=True,
1678
+ transformer_layers_per_block: int = 1,
1679
+ num_attention_heads: int = 1,
1680
+ cross_attention_dim: int = 1024,
1681
+ add_upsample: bool = True,
1682
+ upcast_attention: bool = False,
1683
+ use_linear_projection: Optional[bool] = True,
1684
+ ):
1685
+ super().__init__()
1686
+ resnets = []
1687
+ attentions = []
1688
+ ctrl_to_base = []
1689
+
1690
+ num_layers = 3 # only support sd + sdxl
1691
+
1692
+ self.has_cross_attention = has_crossattn
1693
+ self.num_attention_heads = num_attention_heads
1694
+
1695
+ if isinstance(transformer_layers_per_block, int):
1696
+ transformer_layers_per_block = [transformer_layers_per_block] * num_layers
1697
+
1698
+ for i in range(num_layers):
1699
+ res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
1700
+ resnet_in_channels = prev_output_channel if i == 0 else out_channels
1701
+
1702
+ ctrl_to_base.append(make_zero_conv(ctrl_skip_channels[i], resnet_in_channels))
1703
+
1704
+ resnets.append(
1705
+ ResnetBlock2D(
1706
+ in_channels=resnet_in_channels + res_skip_channels,
1707
+ out_channels=out_channels,
1708
+ temb_channels=temb_channels,
1709
+ groups=norm_num_groups,
1710
+ )
1711
+ )
1712
+
1713
+ if has_crossattn:
1714
+ attentions.append(
1715
+ Transformer2DModel(
1716
+ num_attention_heads,
1717
+ out_channels // num_attention_heads,
1718
+ in_channels=out_channels,
1719
+ num_layers=transformer_layers_per_block[i],
1720
+ cross_attention_dim=cross_attention_dim,
1721
+ use_linear_projection=use_linear_projection,
1722
+ upcast_attention=upcast_attention,
1723
+ norm_num_groups=norm_num_groups,
1724
+ )
1725
+ )
1726
+
1727
+ self.resnets = nn.ModuleList(resnets)
1728
+ self.attentions = nn.ModuleList(attentions) if has_crossattn else [None] * num_layers
1729
+ self.ctrl_to_base = nn.ModuleList(ctrl_to_base)
1730
+
1731
+ if add_upsample:
1732
+ self.upsamplers = Upsample2D(out_channels, use_conv=True, out_channels=out_channels)
1733
+ else:
1734
+ self.upsamplers = None
1735
+
1736
+ self.gradient_checkpointing = False
1737
+ self.resolution_idx = resolution_idx
1738
+
1739
+ @classmethod
1740
+ def from_modules(cls, base_upblock: CrossAttnUpBlock2D, ctrl_upblock: UpBlockControlNetXSAdapter):
1741
+ ctrl_to_base_skip_connections = ctrl_upblock.ctrl_to_base
1742
+
1743
+ # get params
1744
+ def get_first_cross_attention(block):
1745
+ return block.attentions[0].transformer_blocks[0].attn2
1746
+
1747
+ out_channels = base_upblock.resnets[0].out_channels
1748
+ in_channels = base_upblock.resnets[-1].in_channels - out_channels
1749
+ prev_output_channels = base_upblock.resnets[0].in_channels - out_channels
1750
+ ctrl_skip_channelss = [c.in_channels for c in ctrl_to_base_skip_connections]
1751
+ temb_channels = base_upblock.resnets[0].time_emb_proj.in_features
1752
+ num_groups = base_upblock.resnets[0].norm1.num_groups
1753
+ resolution_idx = base_upblock.resolution_idx
1754
+ if hasattr(base_upblock, "attentions"):
1755
+ has_crossattn = True
1756
+ transformer_layers_per_block = len(base_upblock.attentions[0].transformer_blocks)
1757
+ num_attention_heads = get_first_cross_attention(base_upblock).heads
1758
+ cross_attention_dim = get_first_cross_attention(base_upblock).cross_attention_dim
1759
+ upcast_attention = get_first_cross_attention(base_upblock).upcast_attention
1760
+ use_linear_projection = base_upblock.attentions[0].use_linear_projection
1761
+ else:
1762
+ has_crossattn = False
1763
+ transformer_layers_per_block = None
1764
+ num_attention_heads = None
1765
+ cross_attention_dim = None
1766
+ upcast_attention = None
1767
+ use_linear_projection = None
1768
+ add_upsample = base_upblock.upsamplers is not None
1769
+
1770
+ # create model
1771
+ model = cls(
1772
+ in_channels=in_channels,
1773
+ out_channels=out_channels,
1774
+ prev_output_channel=prev_output_channels,
1775
+ ctrl_skip_channels=ctrl_skip_channelss,
1776
+ temb_channels=temb_channels,
1777
+ norm_num_groups=num_groups,
1778
+ resolution_idx=resolution_idx,
1779
+ has_crossattn=has_crossattn,
1780
+ transformer_layers_per_block=transformer_layers_per_block,
1781
+ num_attention_heads=num_attention_heads,
1782
+ cross_attention_dim=cross_attention_dim,
1783
+ add_upsample=add_upsample,
1784
+ upcast_attention=upcast_attention,
1785
+ use_linear_projection=use_linear_projection,
1786
+ )
1787
+
1788
+ # load weights
1789
+ model.resnets.load_state_dict(base_upblock.resnets.state_dict())
1790
+ if has_crossattn:
1791
+ model.attentions.load_state_dict(base_upblock.attentions.state_dict())
1792
+ if add_upsample:
1793
+ model.upsamplers.load_state_dict(base_upblock.upsamplers[0].state_dict())
1794
+ model.ctrl_to_base.load_state_dict(ctrl_to_base_skip_connections.state_dict())
1795
+
1796
+ return model
1797
+
1798
+ def freeze_base_params(self) -> None:
1799
+ """Freeze the weights of the parts belonging to the base UNet2DConditionModel, and leave everything else unfrozen for fine
1800
+ tuning."""
1801
+ # Unfreeze everything
1802
+ for param in self.parameters():
1803
+ param.requires_grad = True
1804
+
1805
+ # Freeze base part
1806
+ base_parts = [self.resnets]
1807
+ if isinstance(self.attentions, nn.ModuleList): # attentions can be a list of Nones
1808
+ base_parts.append(self.attentions)
1809
+ if self.upsamplers is not None:
1810
+ base_parts.append(self.upsamplers)
1811
+ for part in base_parts:
1812
+ for param in part.parameters():
1813
+ param.requires_grad = False
1814
+
1815
+ def forward(
1816
+ self,
1817
+ hidden_states: Tensor,
1818
+ res_hidden_states_tuple_base: Tuple[Tensor, ...],
1819
+ res_hidden_states_tuple_ctrl: Tuple[Tensor, ...],
1820
+ temb: Tensor,
1821
+ encoder_hidden_states: Optional[Tensor] = None,
1822
+ conditioning_scale: Optional[float] = 1.0,
1823
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1824
+ attention_mask: Optional[Tensor] = None,
1825
+ upsample_size: Optional[int] = None,
1826
+ encoder_attention_mask: Optional[Tensor] = None,
1827
+ apply_control: bool = True,
1828
+ ) -> Tensor:
1829
+ if cross_attention_kwargs is not None:
1830
+ if cross_attention_kwargs.get("scale", None) is not None:
1831
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
1832
+
1833
+ is_freeu_enabled = (
1834
+ getattr(self, "s1", None)
1835
+ and getattr(self, "s2", None)
1836
+ and getattr(self, "b1", None)
1837
+ and getattr(self, "b2", None)
1838
+ )
1839
+
1840
+ def maybe_apply_freeu_to_subblock(hidden_states, res_h_base):
1841
+ # FreeU: Only operate on the first two stages
1842
+ if is_freeu_enabled:
1843
+ return apply_freeu(
1844
+ self.resolution_idx,
1845
+ hidden_states,
1846
+ res_h_base,
1847
+ s1=self.s1,
1848
+ s2=self.s2,
1849
+ b1=self.b1,
1850
+ b2=self.b2,
1851
+ )
1852
+ else:
1853
+ return hidden_states, res_h_base
1854
+
1855
+ for resnet, attn, c2b, res_h_base, res_h_ctrl in zip(
1856
+ self.resnets,
1857
+ self.attentions,
1858
+ self.ctrl_to_base,
1859
+ reversed(res_hidden_states_tuple_base),
1860
+ reversed(res_hidden_states_tuple_ctrl),
1861
+ ):
1862
+ if apply_control:
1863
+ hidden_states += c2b(res_h_ctrl) * conditioning_scale
1864
+
1865
+ hidden_states, res_h_base = maybe_apply_freeu_to_subblock(hidden_states, res_h_base)
1866
+ hidden_states = torch.cat([hidden_states, res_h_base], dim=1)
1867
+
1868
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1869
+ hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
1870
+ else:
1871
+ hidden_states = resnet(hidden_states, temb)
1872
+
1873
+ if attn is not None:
1874
+ hidden_states = attn(
1875
+ hidden_states,
1876
+ encoder_hidden_states=encoder_hidden_states,
1877
+ cross_attention_kwargs=cross_attention_kwargs,
1878
+ attention_mask=attention_mask,
1879
+ encoder_attention_mask=encoder_attention_mask,
1880
+ return_dict=False,
1881
+ )[0]
1882
+
1883
+ if self.upsamplers is not None:
1884
+ hidden_states = self.upsamplers(hidden_states, upsample_size)
1885
+
1886
+ return hidden_states
1887
+
1888
+
1889
+ def make_zero_conv(in_channels, out_channels=None):
1890
+ return zero_module(nn.Conv2d(in_channels, out_channels, 1, padding=0))
1891
+
1892
+
1893
+ def zero_module(module):
1894
+ for p in module.parameters():
1895
+ nn.init.zeros_(p)
1896
+ return module
1897
+
1898
+
1899
+ def find_largest_factor(number, max_factor):
1900
+ factor = max_factor
1901
+ if factor >= number:
1902
+ return number
1903
+ while factor != 0:
1904
+ residual = number % factor
1905
+ if residual == 0:
1906
+ return factor
1907
+ factor -= 1
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/multicontrolnet.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
3
+
4
+ import torch
5
+ from torch import nn
6
+
7
+ from ...utils import logging
8
+ from ..controlnets.controlnet import ControlNetModel, ControlNetOutput
9
+ from ..modeling_utils import ModelMixin
10
+
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+
15
+ class MultiControlNetModel(ModelMixin):
16
+ r"""
17
+ Multiple `ControlNetModel` wrapper class for Multi-ControlNet
18
+
19
+ This module is a wrapper for multiple instances of the `ControlNetModel`. The `forward()` API is designed to be
20
+ compatible with `ControlNetModel`.
21
+
22
+ Args:
23
+ controlnets (`List[ControlNetModel]`):
24
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
25
+ `ControlNetModel` as a list.
26
+ """
27
+
28
+ def __init__(self, controlnets: Union[List[ControlNetModel], Tuple[ControlNetModel]]):
29
+ super().__init__()
30
+ self.nets = nn.ModuleList(controlnets)
31
+
32
+ def forward(
33
+ self,
34
+ sample: torch.Tensor,
35
+ timestep: Union[torch.Tensor, float, int],
36
+ encoder_hidden_states: torch.Tensor,
37
+ controlnet_cond: List[torch.tensor],
38
+ conditioning_scale: List[float],
39
+ class_labels: Optional[torch.Tensor] = None,
40
+ timestep_cond: Optional[torch.Tensor] = None,
41
+ attention_mask: Optional[torch.Tensor] = None,
42
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
43
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
44
+ guess_mode: bool = False,
45
+ return_dict: bool = True,
46
+ ) -> Union[ControlNetOutput, Tuple]:
47
+ for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
48
+ down_samples, mid_sample = controlnet(
49
+ sample=sample,
50
+ timestep=timestep,
51
+ encoder_hidden_states=encoder_hidden_states,
52
+ controlnet_cond=image,
53
+ conditioning_scale=scale,
54
+ class_labels=class_labels,
55
+ timestep_cond=timestep_cond,
56
+ attention_mask=attention_mask,
57
+ added_cond_kwargs=added_cond_kwargs,
58
+ cross_attention_kwargs=cross_attention_kwargs,
59
+ guess_mode=guess_mode,
60
+ return_dict=return_dict,
61
+ )
62
+
63
+ # merge samples
64
+ if i == 0:
65
+ down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
66
+ else:
67
+ down_block_res_samples = [
68
+ samples_prev + samples_curr
69
+ for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
70
+ ]
71
+ mid_block_res_sample += mid_sample
72
+
73
+ return down_block_res_samples, mid_block_res_sample
74
+
75
+ def save_pretrained(
76
+ self,
77
+ save_directory: Union[str, os.PathLike],
78
+ is_main_process: bool = True,
79
+ save_function: Callable = None,
80
+ safe_serialization: bool = True,
81
+ variant: Optional[str] = None,
82
+ ):
83
+ """
84
+ Save a model and its configuration file to a directory, so that it can be re-loaded using the
85
+ `[`~models.controlnets.multicontrolnet.MultiControlNetModel.from_pretrained`]` class method.
86
+
87
+ Arguments:
88
+ save_directory (`str` or `os.PathLike`):
89
+ Directory to which to save. Will be created if it doesn't exist.
90
+ is_main_process (`bool`, *optional*, defaults to `True`):
91
+ Whether the process calling this is the main process or not. Useful when in distributed training like
92
+ TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
93
+ the main process to avoid race conditions.
94
+ save_function (`Callable`):
95
+ The function to use to save the state dictionary. Useful on distributed training like TPUs when one
96
+ need to replace `torch.save` by another method. Can be configured with the environment variable
97
+ `DIFFUSERS_SAVE_MODE`.
98
+ safe_serialization (`bool`, *optional*, defaults to `True`):
99
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
100
+ variant (`str`, *optional*):
101
+ If specified, weights are saved in the format pytorch_model.<variant>.bin.
102
+ """
103
+ for idx, controlnet in enumerate(self.nets):
104
+ suffix = "" if idx == 0 else f"_{idx}"
105
+ controlnet.save_pretrained(
106
+ save_directory + suffix,
107
+ is_main_process=is_main_process,
108
+ save_function=save_function,
109
+ safe_serialization=safe_serialization,
110
+ variant=variant,
111
+ )
112
+
113
+ @classmethod
114
+ def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
115
+ r"""
116
+ Instantiate a pretrained MultiControlNet model from multiple pre-trained controlnet models.
117
+
118
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
119
+ the model, you should first set it back in training mode with `model.train()`.
120
+
121
+ The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
122
+ pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
123
+ task.
124
+
125
+ The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
126
+ weights are discarded.
127
+
128
+ Parameters:
129
+ pretrained_model_path (`os.PathLike`):
130
+ A path to a *directory* containing model weights saved using
131
+ [`~models.controlnets.multicontrolnet.MultiControlNetModel.save_pretrained`], e.g.,
132
+ `./my_model_directory/controlnet`.
133
+ torch_dtype (`torch.dtype`, *optional*):
134
+ Override the default `torch.dtype` and load the model under this dtype.
135
+ output_loading_info(`bool`, *optional*, defaults to `False`):
136
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
137
+ device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
138
+ A map that specifies where each submodule should go. It doesn't need to be refined to each
139
+ parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
140
+ same device.
141
+
142
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
143
+ more information about each option see [designing a device
144
+ map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
145
+ max_memory (`Dict`, *optional*):
146
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
147
+ GPU and the available CPU RAM if unset.
148
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
149
+ Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
150
+ also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
151
+ model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
152
+ setting this argument to `True` will raise an error.
153
+ variant (`str`, *optional*):
154
+ If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
155
+ ignored when using `from_flax`.
156
+ use_safetensors (`bool`, *optional*, defaults to `None`):
157
+ If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
158
+ `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
159
+ `safetensors` weights. If set to `False`, loading will *not* use `safetensors`.
160
+ """
161
+ idx = 0
162
+ controlnets = []
163
+
164
+ # load controlnet and append to list until no controlnet directory exists anymore
165
+ # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
166
+ # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
167
+ model_path_to_load = pretrained_model_path
168
+ while os.path.isdir(model_path_to_load):
169
+ controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs)
170
+ controlnets.append(controlnet)
171
+
172
+ idx += 1
173
+ model_path_to_load = pretrained_model_path + f"_{idx}"
174
+
175
+ logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.")
176
+
177
+ if len(controlnets) == 0:
178
+ raise ValueError(
179
+ f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
180
+ )
181
+
182
+ return cls(controlnets)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/controlnets/multicontrolnet_union.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
3
+
4
+ import torch
5
+ from torch import nn
6
+
7
+ from ...utils import logging
8
+ from ..controlnets.controlnet import ControlNetOutput
9
+ from ..controlnets.controlnet_union import ControlNetUnionModel
10
+ from ..modeling_utils import ModelMixin
11
+
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class MultiControlNetUnionModel(ModelMixin):
17
+ r"""
18
+ Multiple `ControlNetUnionModel` wrapper class for Multi-ControlNet-Union.
19
+
20
+ This module is a wrapper for multiple instances of the `ControlNetUnionModel`. The `forward()` API is designed to
21
+ be compatible with `ControlNetUnionModel`.
22
+
23
+ Args:
24
+ controlnets (`List[ControlNetUnionModel]`):
25
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
26
+ `ControlNetUnionModel` as a list.
27
+ """
28
+
29
+ def __init__(self, controlnets: Union[List[ControlNetUnionModel], Tuple[ControlNetUnionModel]]):
30
+ super().__init__()
31
+ self.nets = nn.ModuleList(controlnets)
32
+
33
+ def forward(
34
+ self,
35
+ sample: torch.Tensor,
36
+ timestep: Union[torch.Tensor, float, int],
37
+ encoder_hidden_states: torch.Tensor,
38
+ controlnet_cond: List[torch.tensor],
39
+ control_type: List[torch.Tensor],
40
+ control_type_idx: List[List[int]],
41
+ conditioning_scale: List[float],
42
+ class_labels: Optional[torch.Tensor] = None,
43
+ timestep_cond: Optional[torch.Tensor] = None,
44
+ attention_mask: Optional[torch.Tensor] = None,
45
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
46
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
47
+ guess_mode: bool = False,
48
+ return_dict: bool = True,
49
+ ) -> Union[ControlNetOutput, Tuple]:
50
+ down_block_res_samples, mid_block_res_sample = None, None
51
+ for i, (image, ctype, ctype_idx, scale, controlnet) in enumerate(
52
+ zip(controlnet_cond, control_type, control_type_idx, conditioning_scale, self.nets)
53
+ ):
54
+ if scale == 0.0:
55
+ continue
56
+ down_samples, mid_sample = controlnet(
57
+ sample=sample,
58
+ timestep=timestep,
59
+ encoder_hidden_states=encoder_hidden_states,
60
+ controlnet_cond=image,
61
+ control_type=ctype,
62
+ control_type_idx=ctype_idx,
63
+ conditioning_scale=scale,
64
+ class_labels=class_labels,
65
+ timestep_cond=timestep_cond,
66
+ attention_mask=attention_mask,
67
+ added_cond_kwargs=added_cond_kwargs,
68
+ cross_attention_kwargs=cross_attention_kwargs,
69
+ from_multi=True,
70
+ guess_mode=guess_mode,
71
+ return_dict=return_dict,
72
+ )
73
+
74
+ # merge samples
75
+ if down_block_res_samples is None and mid_block_res_sample is None:
76
+ down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
77
+ else:
78
+ down_block_res_samples = [
79
+ samples_prev + samples_curr
80
+ for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
81
+ ]
82
+ mid_block_res_sample += mid_sample
83
+
84
+ return down_block_res_samples, mid_block_res_sample
85
+
86
+ # Copied from diffusers.models.controlnets.multicontrolnet.MultiControlNetModel.save_pretrained with ControlNet->ControlNetUnion
87
+ def save_pretrained(
88
+ self,
89
+ save_directory: Union[str, os.PathLike],
90
+ is_main_process: bool = True,
91
+ save_function: Callable = None,
92
+ safe_serialization: bool = True,
93
+ variant: Optional[str] = None,
94
+ ):
95
+ """
96
+ Save a model and its configuration file to a directory, so that it can be re-loaded using the
97
+ `[`~models.controlnets.multicontrolnet.MultiControlNetUnionModel.from_pretrained`]` class method.
98
+
99
+ Arguments:
100
+ save_directory (`str` or `os.PathLike`):
101
+ Directory to which to save. Will be created if it doesn't exist.
102
+ is_main_process (`bool`, *optional*, defaults to `True`):
103
+ Whether the process calling this is the main process or not. Useful when in distributed training like
104
+ TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
105
+ the main process to avoid race conditions.
106
+ save_function (`Callable`):
107
+ The function to use to save the state dictionary. Useful on distributed training like TPUs when one
108
+ need to replace `torch.save` by another method. Can be configured with the environment variable
109
+ `DIFFUSERS_SAVE_MODE`.
110
+ safe_serialization (`bool`, *optional*, defaults to `True`):
111
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
112
+ variant (`str`, *optional*):
113
+ If specified, weights are saved in the format pytorch_model.<variant>.bin.
114
+ """
115
+ for idx, controlnet in enumerate(self.nets):
116
+ suffix = "" if idx == 0 else f"_{idx}"
117
+ controlnet.save_pretrained(
118
+ save_directory + suffix,
119
+ is_main_process=is_main_process,
120
+ save_function=save_function,
121
+ safe_serialization=safe_serialization,
122
+ variant=variant,
123
+ )
124
+
125
+ @classmethod
126
+ # Copied from diffusers.models.controlnets.multicontrolnet.MultiControlNetModel.from_pretrained with ControlNet->ControlNetUnion
127
+ def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
128
+ r"""
129
+ Instantiate a pretrained MultiControlNetUnion model from multiple pre-trained controlnet models.
130
+
131
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
132
+ the model, you should first set it back in training mode with `model.train()`.
133
+
134
+ The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
135
+ pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
136
+ task.
137
+
138
+ The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
139
+ weights are discarded.
140
+
141
+ Parameters:
142
+ pretrained_model_path (`os.PathLike`):
143
+ A path to a *directory* containing model weights saved using
144
+ [`~models.controlnets.multicontrolnet.MultiControlNetUnionModel.save_pretrained`], e.g.,
145
+ `./my_model_directory/controlnet`.
146
+ torch_dtype (`torch.dtype`, *optional*):
147
+ Override the default `torch.dtype` and load the model under this dtype.
148
+ output_loading_info(`bool`, *optional*, defaults to `False`):
149
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
150
+ device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
151
+ A map that specifies where each submodule should go. It doesn't need to be refined to each
152
+ parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
153
+ same device.
154
+
155
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
156
+ more information about each option see [designing a device
157
+ map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
158
+ max_memory (`Dict`, *optional*):
159
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
160
+ GPU and the available CPU RAM if unset.
161
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
162
+ Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
163
+ also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
164
+ model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
165
+ setting this argument to `True` will raise an error.
166
+ variant (`str`, *optional*):
167
+ If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
168
+ ignored when using `from_flax`.
169
+ use_safetensors (`bool`, *optional*, defaults to `None`):
170
+ If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
171
+ `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
172
+ `safetensors` weights. If set to `False`, loading will *not* use `safetensors`.
173
+ """
174
+ idx = 0
175
+ controlnets = []
176
+
177
+ # load controlnet and append to list until no controlnet directory exists anymore
178
+ # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
179
+ # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
180
+ model_path_to_load = pretrained_model_path
181
+ while os.path.isdir(model_path_to_load):
182
+ controlnet = ControlNetUnionModel.from_pretrained(model_path_to_load, **kwargs)
183
+ controlnets.append(controlnet)
184
+
185
+ idx += 1
186
+ model_path_to_load = pretrained_model_path + f"_{idx}"
187
+
188
+ logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.")
189
+
190
+ if len(controlnets) == 0:
191
+ raise ValueError(
192
+ f"No ControlNetUnions found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
193
+ )
194
+
195
+ return cls(controlnets)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/__init__.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ...utils import is_torch_available
2
+
3
+
4
+ if is_torch_available():
5
+ from .auraflow_transformer_2d import AuraFlowTransformer2DModel
6
+ from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
7
+ from .consisid_transformer_3d import ConsisIDTransformer3DModel
8
+ from .dit_transformer_2d import DiTTransformer2DModel
9
+ from .dual_transformer_2d import DualTransformer2DModel
10
+ from .hunyuan_transformer_2d import HunyuanDiT2DModel
11
+ from .latte_transformer_3d import LatteTransformer3DModel
12
+ from .lumina_nextdit2d import LuminaNextDiT2DModel
13
+ from .pixart_transformer_2d import PixArtTransformer2DModel
14
+ from .prior_transformer import PriorTransformer
15
+ from .sana_transformer import SanaTransformer2DModel
16
+ from .stable_audio_transformer import StableAudioDiTModel
17
+ from .t5_film_transformer import T5FilmDecoder
18
+ from .transformer_2d import Transformer2DModel
19
+ from .transformer_allegro import AllegroTransformer3DModel
20
+ from .transformer_bria import BriaTransformer2DModel
21
+ from .transformer_chroma import ChromaTransformer2DModel
22
+ from .transformer_cogview3plus import CogView3PlusTransformer2DModel
23
+ from .transformer_cogview4 import CogView4Transformer2DModel
24
+ from .transformer_cosmos import CosmosTransformer3DModel
25
+ from .transformer_easyanimate import EasyAnimateTransformer3DModel
26
+ from .transformer_flux import FluxTransformer2DModel
27
+ from .transformer_hidream_image import HiDreamImageTransformer2DModel
28
+ from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
29
+ from .transformer_hunyuan_video_framepack import HunyuanVideoFramepackTransformer3DModel
30
+ from .transformer_ltx import LTXVideoTransformer3DModel
31
+ from .transformer_lumina2 import Lumina2Transformer2DModel
32
+ from .transformer_mochi import MochiTransformer3DModel
33
+ from .transformer_omnigen import OmniGenTransformer2DModel
34
+ from .transformer_qwenimage import QwenImageTransformer2DModel
35
+ from .transformer_sd3 import SD3Transformer2DModel
36
+ from .transformer_skyreels_v2 import SkyReelsV2Transformer3DModel
37
+ from .transformer_temporal import TransformerTemporalModel
38
+ from .transformer_wan import WanTransformer3DModel
39
+ from .transformer_wan_s2v import WanS2VTransformer3DModel
40
+ from .transformer_wan_vace import WanVACETransformer3DModel
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/auraflow_transformer_2d.py ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 AuraFlow Authors, The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
24
+ from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
25
+ from ...utils.torch_utils import maybe_allow_in_graph
26
+ from ..attention_processor import (
27
+ Attention,
28
+ AttentionProcessor,
29
+ AuraFlowAttnProcessor2_0,
30
+ FusedAuraFlowAttnProcessor2_0,
31
+ )
32
+ from ..embeddings import TimestepEmbedding, Timesteps
33
+ from ..modeling_outputs import Transformer2DModelOutput
34
+ from ..modeling_utils import ModelMixin
35
+ from ..normalization import AdaLayerNormZero, FP32LayerNorm
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+ # Taken from the original aura flow inference code.
42
+ def find_multiple(n: int, k: int) -> int:
43
+ if n % k == 0:
44
+ return n
45
+ return n + k - (n % k)
46
+
47
+
48
+ # Aura Flow patch embed doesn't use convs for projections.
49
+ # Additionally, it uses learned positional embeddings.
50
+ class AuraFlowPatchEmbed(nn.Module):
51
+ def __init__(
52
+ self,
53
+ height=224,
54
+ width=224,
55
+ patch_size=16,
56
+ in_channels=3,
57
+ embed_dim=768,
58
+ pos_embed_max_size=None,
59
+ ):
60
+ super().__init__()
61
+
62
+ self.num_patches = (height // patch_size) * (width // patch_size)
63
+ self.pos_embed_max_size = pos_embed_max_size
64
+
65
+ self.proj = nn.Linear(patch_size * patch_size * in_channels, embed_dim)
66
+ self.pos_embed = nn.Parameter(torch.randn(1, pos_embed_max_size, embed_dim) * 0.1)
67
+
68
+ self.patch_size = patch_size
69
+ self.height, self.width = height // patch_size, width // patch_size
70
+ self.base_size = height // patch_size
71
+
72
+ def pe_selection_index_based_on_dim(self, h, w):
73
+ # select subset of positional embedding based on H, W, where H, W is size of latent
74
+ # PE will be viewed as 2d-grid, and H/p x W/p of the PE will be selected
75
+ # because original input are in flattened format, we have to flatten this 2d grid as well.
76
+ h_p, w_p = h // self.patch_size, w // self.patch_size
77
+ h_max, w_max = int(self.pos_embed_max_size**0.5), int(self.pos_embed_max_size**0.5)
78
+
79
+ # Calculate the top-left corner indices for the centered patch grid
80
+ starth = h_max // 2 - h_p // 2
81
+ startw = w_max // 2 - w_p // 2
82
+
83
+ # Generate the row and column indices for the desired patch grid
84
+ rows = torch.arange(starth, starth + h_p, device=self.pos_embed.device)
85
+ cols = torch.arange(startw, startw + w_p, device=self.pos_embed.device)
86
+
87
+ # Create a 2D grid of indices
88
+ row_indices, col_indices = torch.meshgrid(rows, cols, indexing="ij")
89
+
90
+ # Convert the 2D grid indices to flattened 1D indices
91
+ selected_indices = (row_indices * w_max + col_indices).flatten()
92
+
93
+ return selected_indices
94
+
95
+ def forward(self, latent):
96
+ batch_size, num_channels, height, width = latent.size()
97
+ latent = latent.view(
98
+ batch_size,
99
+ num_channels,
100
+ height // self.patch_size,
101
+ self.patch_size,
102
+ width // self.patch_size,
103
+ self.patch_size,
104
+ )
105
+ latent = latent.permute(0, 2, 4, 1, 3, 5).flatten(-3).flatten(1, 2)
106
+ latent = self.proj(latent)
107
+ pe_index = self.pe_selection_index_based_on_dim(height, width)
108
+ return latent + self.pos_embed[:, pe_index]
109
+
110
+
111
+ # Taken from the original Aura flow inference code.
112
+ # Our feedforward only has GELU but Aura uses SiLU.
113
+ class AuraFlowFeedForward(nn.Module):
114
+ def __init__(self, dim, hidden_dim=None) -> None:
115
+ super().__init__()
116
+ if hidden_dim is None:
117
+ hidden_dim = 4 * dim
118
+
119
+ final_hidden_dim = int(2 * hidden_dim / 3)
120
+ final_hidden_dim = find_multiple(final_hidden_dim, 256)
121
+
122
+ self.linear_1 = nn.Linear(dim, final_hidden_dim, bias=False)
123
+ self.linear_2 = nn.Linear(dim, final_hidden_dim, bias=False)
124
+ self.out_projection = nn.Linear(final_hidden_dim, dim, bias=False)
125
+
126
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
127
+ x = F.silu(self.linear_1(x)) * self.linear_2(x)
128
+ x = self.out_projection(x)
129
+ return x
130
+
131
+
132
+ class AuraFlowPreFinalBlock(nn.Module):
133
+ def __init__(self, embedding_dim: int, conditioning_embedding_dim: int):
134
+ super().__init__()
135
+
136
+ self.silu = nn.SiLU()
137
+ self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=False)
138
+
139
+ def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor:
140
+ emb = self.linear(self.silu(conditioning_embedding).to(x.dtype))
141
+ scale, shift = torch.chunk(emb, 2, dim=1)
142
+ x = x * (1 + scale)[:, None, :] + shift[:, None, :]
143
+ return x
144
+
145
+
146
+ @maybe_allow_in_graph
147
+ class AuraFlowSingleTransformerBlock(nn.Module):
148
+ """Similar to `AuraFlowJointTransformerBlock` with a single DiT instead of an MMDiT."""
149
+
150
+ def __init__(self, dim, num_attention_heads, attention_head_dim):
151
+ super().__init__()
152
+
153
+ self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm")
154
+
155
+ processor = AuraFlowAttnProcessor2_0()
156
+ self.attn = Attention(
157
+ query_dim=dim,
158
+ cross_attention_dim=None,
159
+ dim_head=attention_head_dim,
160
+ heads=num_attention_heads,
161
+ qk_norm="fp32_layer_norm",
162
+ out_dim=dim,
163
+ bias=False,
164
+ out_bias=False,
165
+ processor=processor,
166
+ )
167
+
168
+ self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False)
169
+ self.ff = AuraFlowFeedForward(dim, dim * 4)
170
+
171
+ def forward(
172
+ self,
173
+ hidden_states: torch.FloatTensor,
174
+ temb: torch.FloatTensor,
175
+ attention_kwargs: Optional[Dict[str, Any]] = None,
176
+ ):
177
+ residual = hidden_states
178
+ attention_kwargs = attention_kwargs or {}
179
+
180
+ # Norm + Projection.
181
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
182
+
183
+ # Attention.
184
+ attn_output = self.attn(hidden_states=norm_hidden_states, **attention_kwargs)
185
+
186
+ # Process attention outputs for the `hidden_states`.
187
+ hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output)
188
+ hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
189
+ ff_output = self.ff(hidden_states)
190
+ hidden_states = gate_mlp.unsqueeze(1) * ff_output
191
+ hidden_states = residual + hidden_states
192
+
193
+ return hidden_states
194
+
195
+
196
+ @maybe_allow_in_graph
197
+ class AuraFlowJointTransformerBlock(nn.Module):
198
+ r"""
199
+ Transformer block for Aura Flow. Similar to SD3 MMDiT. Differences (non-exhaustive):
200
+
201
+ * QK Norm in the attention blocks
202
+ * No bias in the attention blocks
203
+ * Most LayerNorms are in FP32
204
+
205
+ Parameters:
206
+ dim (`int`): The number of channels in the input and output.
207
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
208
+ attention_head_dim (`int`): The number of channels in each head.
209
+ is_last (`bool`): Boolean to determine if this is the last block in the model.
210
+ """
211
+
212
+ def __init__(self, dim, num_attention_heads, attention_head_dim):
213
+ super().__init__()
214
+
215
+ self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm")
216
+ self.norm1_context = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm")
217
+
218
+ processor = AuraFlowAttnProcessor2_0()
219
+ self.attn = Attention(
220
+ query_dim=dim,
221
+ cross_attention_dim=None,
222
+ added_kv_proj_dim=dim,
223
+ added_proj_bias=False,
224
+ dim_head=attention_head_dim,
225
+ heads=num_attention_heads,
226
+ qk_norm="fp32_layer_norm",
227
+ out_dim=dim,
228
+ bias=False,
229
+ out_bias=False,
230
+ processor=processor,
231
+ context_pre_only=False,
232
+ )
233
+
234
+ self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False)
235
+ self.ff = AuraFlowFeedForward(dim, dim * 4)
236
+ self.norm2_context = FP32LayerNorm(dim, elementwise_affine=False, bias=False)
237
+ self.ff_context = AuraFlowFeedForward(dim, dim * 4)
238
+
239
+ def forward(
240
+ self,
241
+ hidden_states: torch.FloatTensor,
242
+ encoder_hidden_states: torch.FloatTensor,
243
+ temb: torch.FloatTensor,
244
+ attention_kwargs: Optional[Dict[str, Any]] = None,
245
+ ):
246
+ residual = hidden_states
247
+ residual_context = encoder_hidden_states
248
+ attention_kwargs = attention_kwargs or {}
249
+
250
+ # Norm + Projection.
251
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
252
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
253
+ encoder_hidden_states, emb=temb
254
+ )
255
+
256
+ # Attention.
257
+ attn_output, context_attn_output = self.attn(
258
+ hidden_states=norm_hidden_states,
259
+ encoder_hidden_states=norm_encoder_hidden_states,
260
+ **attention_kwargs,
261
+ )
262
+
263
+ # Process attention outputs for the `hidden_states`.
264
+ hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output)
265
+ hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
266
+ hidden_states = gate_mlp.unsqueeze(1) * self.ff(hidden_states)
267
+ hidden_states = residual + hidden_states
268
+
269
+ # Process attention outputs for the `encoder_hidden_states`.
270
+ encoder_hidden_states = self.norm2_context(residual_context + c_gate_msa.unsqueeze(1) * context_attn_output)
271
+ encoder_hidden_states = encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
272
+ encoder_hidden_states = c_gate_mlp.unsqueeze(1) * self.ff_context(encoder_hidden_states)
273
+ encoder_hidden_states = residual_context + encoder_hidden_states
274
+
275
+ return encoder_hidden_states, hidden_states
276
+
277
+
278
+ class AuraFlowTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
279
+ r"""
280
+ A 2D Transformer model as introduced in AuraFlow (https://blog.fal.ai/auraflow/).
281
+
282
+ Parameters:
283
+ sample_size (`int`): The width of the latent images. This is fixed during training since
284
+ it is used to learn a number of position embeddings.
285
+ patch_size (`int`): Patch size to turn the input data into small patches.
286
+ in_channels (`int`, *optional*, defaults to 4): The number of channels in the input.
287
+ num_mmdit_layers (`int`, *optional*, defaults to 4): The number of layers of MMDiT Transformer blocks to use.
288
+ num_single_dit_layers (`int`, *optional*, defaults to 32):
289
+ The number of layers of Transformer blocks to use. These blocks use concatenated image and text
290
+ representations.
291
+ attention_head_dim (`int`, *optional*, defaults to 256): The number of channels in each head.
292
+ num_attention_heads (`int`, *optional*, defaults to 12): The number of heads to use for multi-head attention.
293
+ joint_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
294
+ caption_projection_dim (`int`): Number of dimensions to use when projecting the `encoder_hidden_states`.
295
+ out_channels (`int`, defaults to 4): Number of output channels.
296
+ pos_embed_max_size (`int`, defaults to 1024): Maximum positions to embed from the image latents.
297
+ """
298
+
299
+ _no_split_modules = ["AuraFlowJointTransformerBlock", "AuraFlowSingleTransformerBlock", "AuraFlowPatchEmbed"]
300
+ _skip_layerwise_casting_patterns = ["pos_embed", "norm"]
301
+ _supports_gradient_checkpointing = True
302
+
303
+ @register_to_config
304
+ def __init__(
305
+ self,
306
+ sample_size: int = 64,
307
+ patch_size: int = 2,
308
+ in_channels: int = 4,
309
+ num_mmdit_layers: int = 4,
310
+ num_single_dit_layers: int = 32,
311
+ attention_head_dim: int = 256,
312
+ num_attention_heads: int = 12,
313
+ joint_attention_dim: int = 2048,
314
+ caption_projection_dim: int = 3072,
315
+ out_channels: int = 4,
316
+ pos_embed_max_size: int = 1024,
317
+ ):
318
+ super().__init__()
319
+ default_out_channels = in_channels
320
+ self.out_channels = out_channels if out_channels is not None else default_out_channels
321
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
322
+
323
+ self.pos_embed = AuraFlowPatchEmbed(
324
+ height=self.config.sample_size,
325
+ width=self.config.sample_size,
326
+ patch_size=self.config.patch_size,
327
+ in_channels=self.config.in_channels,
328
+ embed_dim=self.inner_dim,
329
+ pos_embed_max_size=pos_embed_max_size,
330
+ )
331
+
332
+ self.context_embedder = nn.Linear(
333
+ self.config.joint_attention_dim, self.config.caption_projection_dim, bias=False
334
+ )
335
+ self.time_step_embed = Timesteps(num_channels=256, downscale_freq_shift=0, scale=1000, flip_sin_to_cos=True)
336
+ self.time_step_proj = TimestepEmbedding(in_channels=256, time_embed_dim=self.inner_dim)
337
+
338
+ self.joint_transformer_blocks = nn.ModuleList(
339
+ [
340
+ AuraFlowJointTransformerBlock(
341
+ dim=self.inner_dim,
342
+ num_attention_heads=self.config.num_attention_heads,
343
+ attention_head_dim=self.config.attention_head_dim,
344
+ )
345
+ for i in range(self.config.num_mmdit_layers)
346
+ ]
347
+ )
348
+ self.single_transformer_blocks = nn.ModuleList(
349
+ [
350
+ AuraFlowSingleTransformerBlock(
351
+ dim=self.inner_dim,
352
+ num_attention_heads=self.config.num_attention_heads,
353
+ attention_head_dim=self.config.attention_head_dim,
354
+ )
355
+ for _ in range(self.config.num_single_dit_layers)
356
+ ]
357
+ )
358
+
359
+ self.norm_out = AuraFlowPreFinalBlock(self.inner_dim, self.inner_dim)
360
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=False)
361
+
362
+ # https://huggingface.co/papers/2309.16588
363
+ # prevents artifacts in the attention maps
364
+ self.register_tokens = nn.Parameter(torch.randn(1, 8, self.inner_dim) * 0.02)
365
+
366
+ self.gradient_checkpointing = False
367
+
368
+ @property
369
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
370
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
371
+ r"""
372
+ Returns:
373
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
374
+ indexed by its weight name.
375
+ """
376
+ # set recursively
377
+ processors = {}
378
+
379
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
380
+ if hasattr(module, "get_processor"):
381
+ processors[f"{name}.processor"] = module.get_processor()
382
+
383
+ for sub_name, child in module.named_children():
384
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
385
+
386
+ return processors
387
+
388
+ for name, module in self.named_children():
389
+ fn_recursive_add_processors(name, module, processors)
390
+
391
+ return processors
392
+
393
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
394
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
395
+ r"""
396
+ Sets the attention processor to use to compute attention.
397
+
398
+ Parameters:
399
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
400
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
401
+ for **all** `Attention` layers.
402
+
403
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
404
+ processor. This is strongly recommended when setting trainable attention processors.
405
+
406
+ """
407
+ count = len(self.attn_processors.keys())
408
+
409
+ if isinstance(processor, dict) and len(processor) != count:
410
+ raise ValueError(
411
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
412
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
413
+ )
414
+
415
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
416
+ if hasattr(module, "set_processor"):
417
+ if not isinstance(processor, dict):
418
+ module.set_processor(processor)
419
+ else:
420
+ module.set_processor(processor.pop(f"{name}.processor"))
421
+
422
+ for sub_name, child in module.named_children():
423
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
424
+
425
+ for name, module in self.named_children():
426
+ fn_recursive_attn_processor(name, module, processor)
427
+
428
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedAuraFlowAttnProcessor2_0
429
+ def fuse_qkv_projections(self):
430
+ """
431
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
432
+ are fused. For cross-attention modules, key and value projection matrices are fused.
433
+
434
+ <Tip warning={true}>
435
+
436
+ This API is 🧪 experimental.
437
+
438
+ </Tip>
439
+ """
440
+ self.original_attn_processors = None
441
+
442
+ for _, attn_processor in self.attn_processors.items():
443
+ if "Added" in str(attn_processor.__class__.__name__):
444
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
445
+
446
+ self.original_attn_processors = self.attn_processors
447
+
448
+ for module in self.modules():
449
+ if isinstance(module, Attention):
450
+ module.fuse_projections(fuse=True)
451
+
452
+ self.set_attn_processor(FusedAuraFlowAttnProcessor2_0())
453
+
454
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
455
+ def unfuse_qkv_projections(self):
456
+ """Disables the fused QKV projection if enabled.
457
+
458
+ <Tip warning={true}>
459
+
460
+ This API is 🧪 experimental.
461
+
462
+ </Tip>
463
+
464
+ """
465
+ if self.original_attn_processors is not None:
466
+ self.set_attn_processor(self.original_attn_processors)
467
+
468
+ def forward(
469
+ self,
470
+ hidden_states: torch.FloatTensor,
471
+ encoder_hidden_states: torch.FloatTensor = None,
472
+ timestep: torch.LongTensor = None,
473
+ attention_kwargs: Optional[Dict[str, Any]] = None,
474
+ return_dict: bool = True,
475
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
476
+ if attention_kwargs is not None:
477
+ attention_kwargs = attention_kwargs.copy()
478
+ lora_scale = attention_kwargs.pop("scale", 1.0)
479
+ else:
480
+ lora_scale = 1.0
481
+
482
+ if USE_PEFT_BACKEND:
483
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
484
+ scale_lora_layers(self, lora_scale)
485
+ else:
486
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
487
+ logger.warning(
488
+ "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective."
489
+ )
490
+
491
+ height, width = hidden_states.shape[-2:]
492
+
493
+ # Apply patch embedding, timestep embedding, and project the caption embeddings.
494
+ hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
495
+ temb = self.time_step_embed(timestep).to(dtype=next(self.parameters()).dtype)
496
+ temb = self.time_step_proj(temb)
497
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
498
+ encoder_hidden_states = torch.cat(
499
+ [self.register_tokens.repeat(encoder_hidden_states.size(0), 1, 1), encoder_hidden_states], dim=1
500
+ )
501
+
502
+ # MMDiT blocks.
503
+ for index_block, block in enumerate(self.joint_transformer_blocks):
504
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
505
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
506
+ block,
507
+ hidden_states,
508
+ encoder_hidden_states,
509
+ temb,
510
+ )
511
+
512
+ else:
513
+ encoder_hidden_states, hidden_states = block(
514
+ hidden_states=hidden_states,
515
+ encoder_hidden_states=encoder_hidden_states,
516
+ temb=temb,
517
+ attention_kwargs=attention_kwargs,
518
+ )
519
+
520
+ # Single DiT blocks that combine the `hidden_states` (image) and `encoder_hidden_states` (text)
521
+ if len(self.single_transformer_blocks) > 0:
522
+ encoder_seq_len = encoder_hidden_states.size(1)
523
+ combined_hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
524
+
525
+ for index_block, block in enumerate(self.single_transformer_blocks):
526
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
527
+ combined_hidden_states = self._gradient_checkpointing_func(
528
+ block,
529
+ combined_hidden_states,
530
+ temb,
531
+ )
532
+
533
+ else:
534
+ combined_hidden_states = block(
535
+ hidden_states=combined_hidden_states, temb=temb, attention_kwargs=attention_kwargs
536
+ )
537
+
538
+ hidden_states = combined_hidden_states[:, encoder_seq_len:]
539
+
540
+ hidden_states = self.norm_out(hidden_states, temb)
541
+ hidden_states = self.proj_out(hidden_states)
542
+
543
+ # unpatchify
544
+ patch_size = self.config.patch_size
545
+ out_channels = self.config.out_channels
546
+ height = height // patch_size
547
+ width = width // patch_size
548
+
549
+ hidden_states = hidden_states.reshape(
550
+ shape=(hidden_states.shape[0], height, width, patch_size, patch_size, out_channels)
551
+ )
552
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
553
+ output = hidden_states.reshape(
554
+ shape=(hidden_states.shape[0], out_channels, height * patch_size, width * patch_size)
555
+ )
556
+
557
+ if USE_PEFT_BACKEND:
558
+ # remove `lora_scale` from each PEFT layer
559
+ unscale_lora_layers(self, lora_scale)
560
+
561
+ if not return_dict:
562
+ return (output,)
563
+
564
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/cogvideox_transformer_3d.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Any, Dict, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders import PeftAdapterMixin
23
+ from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
24
+ from ...utils.torch_utils import maybe_allow_in_graph
25
+ from ..attention import Attention, FeedForward
26
+ from ..attention_processor import AttentionProcessor, CogVideoXAttnProcessor2_0, FusedCogVideoXAttnProcessor2_0
27
+ from ..cache_utils import CacheMixin
28
+ from ..embeddings import CogVideoXPatchEmbed, TimestepEmbedding, Timesteps
29
+ from ..modeling_outputs import Transformer2DModelOutput
30
+ from ..modeling_utils import ModelMixin
31
+ from ..normalization import AdaLayerNorm, CogVideoXLayerNormZero
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ @maybe_allow_in_graph
38
+ class CogVideoXBlock(nn.Module):
39
+ r"""
40
+ Transformer block used in [CogVideoX](https://github.com/THUDM/CogVideo) model.
41
+
42
+ Parameters:
43
+ dim (`int`):
44
+ The number of channels in the input and output.
45
+ num_attention_heads (`int`):
46
+ The number of heads to use for multi-head attention.
47
+ attention_head_dim (`int`):
48
+ The number of channels in each head.
49
+ time_embed_dim (`int`):
50
+ The number of channels in timestep embedding.
51
+ dropout (`float`, defaults to `0.0`):
52
+ The dropout probability to use.
53
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
54
+ Activation function to be used in feed-forward.
55
+ attention_bias (`bool`, defaults to `False`):
56
+ Whether or not to use bias in attention projection layers.
57
+ qk_norm (`bool`, defaults to `True`):
58
+ Whether or not to use normalization after query and key projections in Attention.
59
+ norm_elementwise_affine (`bool`, defaults to `True`):
60
+ Whether to use learnable elementwise affine parameters for normalization.
61
+ norm_eps (`float`, defaults to `1e-5`):
62
+ Epsilon value for normalization layers.
63
+ final_dropout (`bool` defaults to `False`):
64
+ Whether to apply a final dropout after the last feed-forward layer.
65
+ ff_inner_dim (`int`, *optional*, defaults to `None`):
66
+ Custom hidden dimension of Feed-forward layer. If not provided, `4 * dim` is used.
67
+ ff_bias (`bool`, defaults to `True`):
68
+ Whether or not to use bias in Feed-forward layer.
69
+ attention_out_bias (`bool`, defaults to `True`):
70
+ Whether or not to use bias in Attention output projection layer.
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ dim: int,
76
+ num_attention_heads: int,
77
+ attention_head_dim: int,
78
+ time_embed_dim: int,
79
+ dropout: float = 0.0,
80
+ activation_fn: str = "gelu-approximate",
81
+ attention_bias: bool = False,
82
+ qk_norm: bool = True,
83
+ norm_elementwise_affine: bool = True,
84
+ norm_eps: float = 1e-5,
85
+ final_dropout: bool = True,
86
+ ff_inner_dim: Optional[int] = None,
87
+ ff_bias: bool = True,
88
+ attention_out_bias: bool = True,
89
+ ):
90
+ super().__init__()
91
+
92
+ # 1. Self Attention
93
+ self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True)
94
+
95
+ self.attn1 = Attention(
96
+ query_dim=dim,
97
+ dim_head=attention_head_dim,
98
+ heads=num_attention_heads,
99
+ qk_norm="layer_norm" if qk_norm else None,
100
+ eps=1e-6,
101
+ bias=attention_bias,
102
+ out_bias=attention_out_bias,
103
+ processor=CogVideoXAttnProcessor2_0(),
104
+ )
105
+
106
+ # 2. Feed Forward
107
+ self.norm2 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True)
108
+
109
+ self.ff = FeedForward(
110
+ dim,
111
+ dropout=dropout,
112
+ activation_fn=activation_fn,
113
+ final_dropout=final_dropout,
114
+ inner_dim=ff_inner_dim,
115
+ bias=ff_bias,
116
+ )
117
+
118
+ def forward(
119
+ self,
120
+ hidden_states: torch.Tensor,
121
+ encoder_hidden_states: torch.Tensor,
122
+ temb: torch.Tensor,
123
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
124
+ attention_kwargs: Optional[Dict[str, Any]] = None,
125
+ ) -> torch.Tensor:
126
+ text_seq_length = encoder_hidden_states.size(1)
127
+ attention_kwargs = attention_kwargs or {}
128
+
129
+ # norm & modulate
130
+ norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1(
131
+ hidden_states, encoder_hidden_states, temb
132
+ )
133
+
134
+ # attention
135
+ attn_hidden_states, attn_encoder_hidden_states = self.attn1(
136
+ hidden_states=norm_hidden_states,
137
+ encoder_hidden_states=norm_encoder_hidden_states,
138
+ image_rotary_emb=image_rotary_emb,
139
+ **attention_kwargs,
140
+ )
141
+
142
+ hidden_states = hidden_states + gate_msa * attn_hidden_states
143
+ encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states
144
+
145
+ # norm & modulate
146
+ norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2(
147
+ hidden_states, encoder_hidden_states, temb
148
+ )
149
+
150
+ # feed-forward
151
+ norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1)
152
+ ff_output = self.ff(norm_hidden_states)
153
+
154
+ hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:]
155
+ encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length]
156
+
157
+ return hidden_states, encoder_hidden_states
158
+
159
+
160
+ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, CacheMixin):
161
+ """
162
+ A Transformer model for video-like data in [CogVideoX](https://github.com/THUDM/CogVideo).
163
+
164
+ Parameters:
165
+ num_attention_heads (`int`, defaults to `30`):
166
+ The number of heads to use for multi-head attention.
167
+ attention_head_dim (`int`, defaults to `64`):
168
+ The number of channels in each head.
169
+ in_channels (`int`, defaults to `16`):
170
+ The number of channels in the input.
171
+ out_channels (`int`, *optional*, defaults to `16`):
172
+ The number of channels in the output.
173
+ flip_sin_to_cos (`bool`, defaults to `True`):
174
+ Whether to flip the sin to cos in the time embedding.
175
+ time_embed_dim (`int`, defaults to `512`):
176
+ Output dimension of timestep embeddings.
177
+ ofs_embed_dim (`int`, defaults to `512`):
178
+ Output dimension of "ofs" embeddings used in CogVideoX-5b-I2B in version 1.5
179
+ text_embed_dim (`int`, defaults to `4096`):
180
+ Input dimension of text embeddings from the text encoder.
181
+ num_layers (`int`, defaults to `30`):
182
+ The number of layers of Transformer blocks to use.
183
+ dropout (`float`, defaults to `0.0`):
184
+ The dropout probability to use.
185
+ attention_bias (`bool`, defaults to `True`):
186
+ Whether to use bias in the attention projection layers.
187
+ sample_width (`int`, defaults to `90`):
188
+ The width of the input latents.
189
+ sample_height (`int`, defaults to `60`):
190
+ The height of the input latents.
191
+ sample_frames (`int`, defaults to `49`):
192
+ The number of frames in the input latents. Note that this parameter was incorrectly initialized to 49
193
+ instead of 13 because CogVideoX processed 13 latent frames at once in its default and recommended settings,
194
+ but cannot be changed to the correct value to ensure backwards compatibility. To create a transformer with
195
+ K latent frames, the correct value to pass here would be: ((K - 1) * temporal_compression_ratio + 1).
196
+ patch_size (`int`, defaults to `2`):
197
+ The size of the patches to use in the patch embedding layer.
198
+ temporal_compression_ratio (`int`, defaults to `4`):
199
+ The compression ratio across the temporal dimension. See documentation for `sample_frames`.
200
+ max_text_seq_length (`int`, defaults to `226`):
201
+ The maximum sequence length of the input text embeddings.
202
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
203
+ Activation function to use in feed-forward.
204
+ timestep_activation_fn (`str`, defaults to `"silu"`):
205
+ Activation function to use when generating the timestep embeddings.
206
+ norm_elementwise_affine (`bool`, defaults to `True`):
207
+ Whether to use elementwise affine in normalization layers.
208
+ norm_eps (`float`, defaults to `1e-5`):
209
+ The epsilon value to use in normalization layers.
210
+ spatial_interpolation_scale (`float`, defaults to `1.875`):
211
+ Scaling factor to apply in 3D positional embeddings across spatial dimensions.
212
+ temporal_interpolation_scale (`float`, defaults to `1.0`):
213
+ Scaling factor to apply in 3D positional embeddings across temporal dimensions.
214
+ """
215
+
216
+ _skip_layerwise_casting_patterns = ["patch_embed", "norm"]
217
+ _supports_gradient_checkpointing = True
218
+ _no_split_modules = ["CogVideoXBlock", "CogVideoXPatchEmbed"]
219
+
220
+ @register_to_config
221
+ def __init__(
222
+ self,
223
+ num_attention_heads: int = 30,
224
+ attention_head_dim: int = 64,
225
+ in_channels: int = 16,
226
+ out_channels: Optional[int] = 16,
227
+ flip_sin_to_cos: bool = True,
228
+ freq_shift: int = 0,
229
+ time_embed_dim: int = 512,
230
+ ofs_embed_dim: Optional[int] = None,
231
+ text_embed_dim: int = 4096,
232
+ num_layers: int = 30,
233
+ dropout: float = 0.0,
234
+ attention_bias: bool = True,
235
+ sample_width: int = 90,
236
+ sample_height: int = 60,
237
+ sample_frames: int = 49,
238
+ patch_size: int = 2,
239
+ patch_size_t: Optional[int] = None,
240
+ temporal_compression_ratio: int = 4,
241
+ max_text_seq_length: int = 226,
242
+ activation_fn: str = "gelu-approximate",
243
+ timestep_activation_fn: str = "silu",
244
+ norm_elementwise_affine: bool = True,
245
+ norm_eps: float = 1e-5,
246
+ spatial_interpolation_scale: float = 1.875,
247
+ temporal_interpolation_scale: float = 1.0,
248
+ use_rotary_positional_embeddings: bool = False,
249
+ use_learned_positional_embeddings: bool = False,
250
+ patch_bias: bool = True,
251
+ ):
252
+ super().__init__()
253
+ inner_dim = num_attention_heads * attention_head_dim
254
+
255
+ if not use_rotary_positional_embeddings and use_learned_positional_embeddings:
256
+ raise ValueError(
257
+ "There are no CogVideoX checkpoints available with disable rotary embeddings and learned positional "
258
+ "embeddings. If you're using a custom model and/or believe this should be supported, please open an "
259
+ "issue at https://github.com/huggingface/diffusers/issues."
260
+ )
261
+
262
+ # 1. Patch embedding
263
+ self.patch_embed = CogVideoXPatchEmbed(
264
+ patch_size=patch_size,
265
+ patch_size_t=patch_size_t,
266
+ in_channels=in_channels,
267
+ embed_dim=inner_dim,
268
+ text_embed_dim=text_embed_dim,
269
+ bias=patch_bias,
270
+ sample_width=sample_width,
271
+ sample_height=sample_height,
272
+ sample_frames=sample_frames,
273
+ temporal_compression_ratio=temporal_compression_ratio,
274
+ max_text_seq_length=max_text_seq_length,
275
+ spatial_interpolation_scale=spatial_interpolation_scale,
276
+ temporal_interpolation_scale=temporal_interpolation_scale,
277
+ use_positional_embeddings=not use_rotary_positional_embeddings,
278
+ use_learned_positional_embeddings=use_learned_positional_embeddings,
279
+ )
280
+ self.embedding_dropout = nn.Dropout(dropout)
281
+
282
+ # 2. Time embeddings and ofs embedding(Only CogVideoX1.5-5B I2V have)
283
+
284
+ self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift)
285
+ self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn)
286
+
287
+ self.ofs_proj = None
288
+ self.ofs_embedding = None
289
+ if ofs_embed_dim:
290
+ self.ofs_proj = Timesteps(ofs_embed_dim, flip_sin_to_cos, freq_shift)
291
+ self.ofs_embedding = TimestepEmbedding(
292
+ ofs_embed_dim, ofs_embed_dim, timestep_activation_fn
293
+ ) # same as time embeddings, for ofs
294
+
295
+ # 3. Define spatio-temporal transformers blocks
296
+ self.transformer_blocks = nn.ModuleList(
297
+ [
298
+ CogVideoXBlock(
299
+ dim=inner_dim,
300
+ num_attention_heads=num_attention_heads,
301
+ attention_head_dim=attention_head_dim,
302
+ time_embed_dim=time_embed_dim,
303
+ dropout=dropout,
304
+ activation_fn=activation_fn,
305
+ attention_bias=attention_bias,
306
+ norm_elementwise_affine=norm_elementwise_affine,
307
+ norm_eps=norm_eps,
308
+ )
309
+ for _ in range(num_layers)
310
+ ]
311
+ )
312
+ self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine)
313
+
314
+ # 4. Output blocks
315
+ self.norm_out = AdaLayerNorm(
316
+ embedding_dim=time_embed_dim,
317
+ output_dim=2 * inner_dim,
318
+ norm_elementwise_affine=norm_elementwise_affine,
319
+ norm_eps=norm_eps,
320
+ chunk_dim=1,
321
+ )
322
+
323
+ if patch_size_t is None:
324
+ # For CogVideox 1.0
325
+ output_dim = patch_size * patch_size * out_channels
326
+ else:
327
+ # For CogVideoX 1.5
328
+ output_dim = patch_size * patch_size * patch_size_t * out_channels
329
+
330
+ self.proj_out = nn.Linear(inner_dim, output_dim)
331
+
332
+ self.gradient_checkpointing = False
333
+
334
+ @property
335
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
336
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
337
+ r"""
338
+ Returns:
339
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
340
+ indexed by its weight name.
341
+ """
342
+ # set recursively
343
+ processors = {}
344
+
345
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
346
+ if hasattr(module, "get_processor"):
347
+ processors[f"{name}.processor"] = module.get_processor()
348
+
349
+ for sub_name, child in module.named_children():
350
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
351
+
352
+ return processors
353
+
354
+ for name, module in self.named_children():
355
+ fn_recursive_add_processors(name, module, processors)
356
+
357
+ return processors
358
+
359
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
360
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
361
+ r"""
362
+ Sets the attention processor to use to compute attention.
363
+
364
+ Parameters:
365
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
366
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
367
+ for **all** `Attention` layers.
368
+
369
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
370
+ processor. This is strongly recommended when setting trainable attention processors.
371
+
372
+ """
373
+ count = len(self.attn_processors.keys())
374
+
375
+ if isinstance(processor, dict) and len(processor) != count:
376
+ raise ValueError(
377
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
378
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
379
+ )
380
+
381
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
382
+ if hasattr(module, "set_processor"):
383
+ if not isinstance(processor, dict):
384
+ module.set_processor(processor)
385
+ else:
386
+ module.set_processor(processor.pop(f"{name}.processor"))
387
+
388
+ for sub_name, child in module.named_children():
389
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
390
+
391
+ for name, module in self.named_children():
392
+ fn_recursive_attn_processor(name, module, processor)
393
+
394
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedCogVideoXAttnProcessor2_0
395
+ def fuse_qkv_projections(self):
396
+ """
397
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
398
+ are fused. For cross-attention modules, key and value projection matrices are fused.
399
+
400
+ <Tip warning={true}>
401
+
402
+ This API is 🧪 experimental.
403
+
404
+ </Tip>
405
+ """
406
+ self.original_attn_processors = None
407
+
408
+ for _, attn_processor in self.attn_processors.items():
409
+ if "Added" in str(attn_processor.__class__.__name__):
410
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
411
+
412
+ self.original_attn_processors = self.attn_processors
413
+
414
+ for module in self.modules():
415
+ if isinstance(module, Attention):
416
+ module.fuse_projections(fuse=True)
417
+
418
+ self.set_attn_processor(FusedCogVideoXAttnProcessor2_0())
419
+
420
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
421
+ def unfuse_qkv_projections(self):
422
+ """Disables the fused QKV projection if enabled.
423
+
424
+ <Tip warning={true}>
425
+
426
+ This API is 🧪 experimental.
427
+
428
+ </Tip>
429
+
430
+ """
431
+ if self.original_attn_processors is not None:
432
+ self.set_attn_processor(self.original_attn_processors)
433
+
434
+ def forward(
435
+ self,
436
+ hidden_states: torch.Tensor,
437
+ encoder_hidden_states: torch.Tensor,
438
+ timestep: Union[int, float, torch.LongTensor],
439
+ timestep_cond: Optional[torch.Tensor] = None,
440
+ ofs: Optional[Union[int, float, torch.LongTensor]] = None,
441
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
442
+ attention_kwargs: Optional[Dict[str, Any]] = None,
443
+ return_dict: bool = True,
444
+ ):
445
+ if attention_kwargs is not None:
446
+ attention_kwargs = attention_kwargs.copy()
447
+ lora_scale = attention_kwargs.pop("scale", 1.0)
448
+ else:
449
+ lora_scale = 1.0
450
+
451
+ if USE_PEFT_BACKEND:
452
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
453
+ scale_lora_layers(self, lora_scale)
454
+ else:
455
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
456
+ logger.warning(
457
+ "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective."
458
+ )
459
+
460
+ batch_size, num_frames, channels, height, width = hidden_states.shape
461
+
462
+ # 1. Time embedding
463
+ timesteps = timestep
464
+ t_emb = self.time_proj(timesteps)
465
+
466
+ # timesteps does not contain any weights and will always return f32 tensors
467
+ # but time_embedding might actually be running in fp16. so we need to cast here.
468
+ # there might be better ways to encapsulate this.
469
+ t_emb = t_emb.to(dtype=hidden_states.dtype)
470
+ emb = self.time_embedding(t_emb, timestep_cond)
471
+
472
+ if self.ofs_embedding is not None:
473
+ ofs_emb = self.ofs_proj(ofs)
474
+ ofs_emb = ofs_emb.to(dtype=hidden_states.dtype)
475
+ ofs_emb = self.ofs_embedding(ofs_emb)
476
+ emb = emb + ofs_emb
477
+
478
+ # 2. Patch embedding
479
+ hidden_states = self.patch_embed(encoder_hidden_states, hidden_states)
480
+ hidden_states = self.embedding_dropout(hidden_states)
481
+
482
+ text_seq_length = encoder_hidden_states.shape[1]
483
+ encoder_hidden_states = hidden_states[:, :text_seq_length]
484
+ hidden_states = hidden_states[:, text_seq_length:]
485
+
486
+ # 3. Transformer blocks
487
+ for i, block in enumerate(self.transformer_blocks):
488
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
489
+ hidden_states, encoder_hidden_states = self._gradient_checkpointing_func(
490
+ block,
491
+ hidden_states,
492
+ encoder_hidden_states,
493
+ emb,
494
+ image_rotary_emb,
495
+ attention_kwargs,
496
+ )
497
+ else:
498
+ hidden_states, encoder_hidden_states = block(
499
+ hidden_states=hidden_states,
500
+ encoder_hidden_states=encoder_hidden_states,
501
+ temb=emb,
502
+ image_rotary_emb=image_rotary_emb,
503
+ attention_kwargs=attention_kwargs,
504
+ )
505
+
506
+ hidden_states = self.norm_final(hidden_states)
507
+
508
+ # 4. Final block
509
+ hidden_states = self.norm_out(hidden_states, temb=emb)
510
+ hidden_states = self.proj_out(hidden_states)
511
+
512
+ # 5. Unpatchify
513
+ p = self.config.patch_size
514
+ p_t = self.config.patch_size_t
515
+
516
+ if p_t is None:
517
+ output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, -1, p, p)
518
+ output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4)
519
+ else:
520
+ output = hidden_states.reshape(
521
+ batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p
522
+ )
523
+ output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2)
524
+
525
+ if USE_PEFT_BACKEND:
526
+ # remove `lora_scale` from each PEFT layer
527
+ unscale_lora_layers(self, lora_scale)
528
+
529
+ if not return_dict:
530
+ return (output,)
531
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/consisid_transformer_3d.py ADDED
@@ -0,0 +1,789 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 ConsisID Authors and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders import PeftAdapterMixin
23
+ from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
24
+ from ...utils.torch_utils import maybe_allow_in_graph
25
+ from ..attention import Attention, FeedForward
26
+ from ..attention_processor import AttentionProcessor, CogVideoXAttnProcessor2_0
27
+ from ..embeddings import CogVideoXPatchEmbed, TimestepEmbedding, Timesteps
28
+ from ..modeling_outputs import Transformer2DModelOutput
29
+ from ..modeling_utils import ModelMixin
30
+ from ..normalization import AdaLayerNorm, CogVideoXLayerNormZero
31
+
32
+
33
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
+
35
+
36
+ class PerceiverAttention(nn.Module):
37
+ def __init__(self, dim: int, dim_head: int = 64, heads: int = 8, kv_dim: Optional[int] = None):
38
+ super().__init__()
39
+
40
+ self.scale = dim_head**-0.5
41
+ self.dim_head = dim_head
42
+ self.heads = heads
43
+ inner_dim = dim_head * heads
44
+
45
+ self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim)
46
+ self.norm2 = nn.LayerNorm(dim)
47
+
48
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
49
+ self.to_kv = nn.Linear(dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False)
50
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
51
+
52
+ def forward(self, image_embeds: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:
53
+ # Apply normalization
54
+ image_embeds = self.norm1(image_embeds)
55
+ latents = self.norm2(latents)
56
+
57
+ batch_size, seq_len, _ = latents.shape # Get batch size and sequence length
58
+
59
+ # Compute query, key, and value matrices
60
+ query = self.to_q(latents)
61
+ kv_input = torch.cat((image_embeds, latents), dim=-2)
62
+ key, value = self.to_kv(kv_input).chunk(2, dim=-1)
63
+
64
+ # Reshape the tensors for multi-head attention
65
+ query = query.reshape(query.size(0), -1, self.heads, self.dim_head).transpose(1, 2)
66
+ key = key.reshape(key.size(0), -1, self.heads, self.dim_head).transpose(1, 2)
67
+ value = value.reshape(value.size(0), -1, self.heads, self.dim_head).transpose(1, 2)
68
+
69
+ # attention
70
+ scale = 1 / math.sqrt(math.sqrt(self.dim_head))
71
+ weight = (query * scale) @ (key * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
72
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
73
+ output = weight @ value
74
+
75
+ # Reshape and return the final output
76
+ output = output.permute(0, 2, 1, 3).reshape(batch_size, seq_len, -1)
77
+
78
+ return self.to_out(output)
79
+
80
+
81
+ class LocalFacialExtractor(nn.Module):
82
+ def __init__(
83
+ self,
84
+ id_dim: int = 1280,
85
+ vit_dim: int = 1024,
86
+ depth: int = 10,
87
+ dim_head: int = 64,
88
+ heads: int = 16,
89
+ num_id_token: int = 5,
90
+ num_queries: int = 32,
91
+ output_dim: int = 2048,
92
+ ff_mult: int = 4,
93
+ num_scale: int = 5,
94
+ ):
95
+ super().__init__()
96
+
97
+ # Storing identity token and query information
98
+ self.num_id_token = num_id_token
99
+ self.vit_dim = vit_dim
100
+ self.num_queries = num_queries
101
+ assert depth % num_scale == 0
102
+ self.depth = depth // num_scale
103
+ self.num_scale = num_scale
104
+ scale = vit_dim**-0.5
105
+
106
+ # Learnable latent query embeddings
107
+ self.latents = nn.Parameter(torch.randn(1, num_queries, vit_dim) * scale)
108
+ # Projection layer to map the latent output to the desired dimension
109
+ self.proj_out = nn.Parameter(scale * torch.randn(vit_dim, output_dim))
110
+
111
+ # Attention and ConsisIDFeedForward layer stack
112
+ self.layers = nn.ModuleList([])
113
+ for _ in range(depth):
114
+ self.layers.append(
115
+ nn.ModuleList(
116
+ [
117
+ PerceiverAttention(dim=vit_dim, dim_head=dim_head, heads=heads), # Perceiver Attention layer
118
+ nn.Sequential(
119
+ nn.LayerNorm(vit_dim),
120
+ nn.Linear(vit_dim, vit_dim * ff_mult, bias=False),
121
+ nn.GELU(),
122
+ nn.Linear(vit_dim * ff_mult, vit_dim, bias=False),
123
+ ), # ConsisIDFeedForward layer
124
+ ]
125
+ )
126
+ )
127
+
128
+ # Mappings for each of the 5 different ViT features
129
+ for i in range(num_scale):
130
+ setattr(
131
+ self,
132
+ f"mapping_{i}",
133
+ nn.Sequential(
134
+ nn.Linear(vit_dim, vit_dim),
135
+ nn.LayerNorm(vit_dim),
136
+ nn.LeakyReLU(),
137
+ nn.Linear(vit_dim, vit_dim),
138
+ nn.LayerNorm(vit_dim),
139
+ nn.LeakyReLU(),
140
+ nn.Linear(vit_dim, vit_dim),
141
+ ),
142
+ )
143
+
144
+ # Mapping for identity embedding vectors
145
+ self.id_embedding_mapping = nn.Sequential(
146
+ nn.Linear(id_dim, vit_dim),
147
+ nn.LayerNorm(vit_dim),
148
+ nn.LeakyReLU(),
149
+ nn.Linear(vit_dim, vit_dim),
150
+ nn.LayerNorm(vit_dim),
151
+ nn.LeakyReLU(),
152
+ nn.Linear(vit_dim, vit_dim * num_id_token),
153
+ )
154
+
155
+ def forward(self, id_embeds: torch.Tensor, vit_hidden_states: List[torch.Tensor]) -> torch.Tensor:
156
+ # Repeat latent queries for the batch size
157
+ latents = self.latents.repeat(id_embeds.size(0), 1, 1)
158
+
159
+ # Map the identity embedding to tokens
160
+ id_embeds = self.id_embedding_mapping(id_embeds)
161
+ id_embeds = id_embeds.reshape(-1, self.num_id_token, self.vit_dim)
162
+
163
+ # Concatenate identity tokens with the latent queries
164
+ latents = torch.cat((latents, id_embeds), dim=1)
165
+
166
+ # Process each of the num_scale visual feature inputs
167
+ for i in range(self.num_scale):
168
+ vit_feature = getattr(self, f"mapping_{i}")(vit_hidden_states[i])
169
+ ctx_feature = torch.cat((id_embeds, vit_feature), dim=1)
170
+
171
+ # Pass through the PerceiverAttention and ConsisIDFeedForward layers
172
+ for attn, ff in self.layers[i * self.depth : (i + 1) * self.depth]:
173
+ latents = attn(ctx_feature, latents) + latents
174
+ latents = ff(latents) + latents
175
+
176
+ # Retain only the query latents
177
+ latents = latents[:, : self.num_queries]
178
+ # Project the latents to the output dimension
179
+ latents = latents @ self.proj_out
180
+ return latents
181
+
182
+
183
+ class PerceiverCrossAttention(nn.Module):
184
+ def __init__(self, dim: int = 3072, dim_head: int = 128, heads: int = 16, kv_dim: int = 2048):
185
+ super().__init__()
186
+
187
+ self.scale = dim_head**-0.5
188
+ self.dim_head = dim_head
189
+ self.heads = heads
190
+ inner_dim = dim_head * heads
191
+
192
+ # Layer normalization to stabilize training
193
+ self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim)
194
+ self.norm2 = nn.LayerNorm(dim)
195
+
196
+ # Linear transformations to produce queries, keys, and values
197
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
198
+ self.to_kv = nn.Linear(dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False)
199
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
200
+
201
+ def forward(self, image_embeds: torch.Tensor, hidden_states: torch.Tensor) -> torch.Tensor:
202
+ # Apply layer normalization to the input image and latent features
203
+ image_embeds = self.norm1(image_embeds)
204
+ hidden_states = self.norm2(hidden_states)
205
+
206
+ batch_size, seq_len, _ = hidden_states.shape
207
+
208
+ # Compute queries, keys, and values
209
+ query = self.to_q(hidden_states)
210
+ key, value = self.to_kv(image_embeds).chunk(2, dim=-1)
211
+
212
+ # Reshape tensors to split into attention heads
213
+ query = query.reshape(query.size(0), -1, self.heads, self.dim_head).transpose(1, 2)
214
+ key = key.reshape(key.size(0), -1, self.heads, self.dim_head).transpose(1, 2)
215
+ value = value.reshape(value.size(0), -1, self.heads, self.dim_head).transpose(1, 2)
216
+
217
+ # Compute attention weights
218
+ scale = 1 / math.sqrt(math.sqrt(self.dim_head))
219
+ weight = (query * scale) @ (key * scale).transpose(-2, -1) # More stable scaling than post-division
220
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
221
+
222
+ # Compute the output via weighted combination of values
223
+ out = weight @ value
224
+
225
+ # Reshape and permute to prepare for final linear transformation
226
+ out = out.permute(0, 2, 1, 3).reshape(batch_size, seq_len, -1)
227
+
228
+ return self.to_out(out)
229
+
230
+
231
+ @maybe_allow_in_graph
232
+ class ConsisIDBlock(nn.Module):
233
+ r"""
234
+ Transformer block used in [ConsisID](https://github.com/PKU-YuanGroup/ConsisID) model.
235
+
236
+ Parameters:
237
+ dim (`int`):
238
+ The number of channels in the input and output.
239
+ num_attention_heads (`int`):
240
+ The number of heads to use for multi-head attention.
241
+ attention_head_dim (`int`):
242
+ The number of channels in each head.
243
+ time_embed_dim (`int`):
244
+ The number of channels in timestep embedding.
245
+ dropout (`float`, defaults to `0.0`):
246
+ The dropout probability to use.
247
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
248
+ Activation function to be used in feed-forward.
249
+ attention_bias (`bool`, defaults to `False`):
250
+ Whether or not to use bias in attention projection layers.
251
+ qk_norm (`bool`, defaults to `True`):
252
+ Whether or not to use normalization after query and key projections in Attention.
253
+ norm_elementwise_affine (`bool`, defaults to `True`):
254
+ Whether to use learnable elementwise affine parameters for normalization.
255
+ norm_eps (`float`, defaults to `1e-5`):
256
+ Epsilon value for normalization layers.
257
+ final_dropout (`bool` defaults to `False`):
258
+ Whether to apply a final dropout after the last feed-forward layer.
259
+ ff_inner_dim (`int`, *optional*, defaults to `None`):
260
+ Custom hidden dimension of Feed-forward layer. If not provided, `4 * dim` is used.
261
+ ff_bias (`bool`, defaults to `True`):
262
+ Whether or not to use bias in Feed-forward layer.
263
+ attention_out_bias (`bool`, defaults to `True`):
264
+ Whether or not to use bias in Attention output projection layer.
265
+ """
266
+
267
+ def __init__(
268
+ self,
269
+ dim: int,
270
+ num_attention_heads: int,
271
+ attention_head_dim: int,
272
+ time_embed_dim: int,
273
+ dropout: float = 0.0,
274
+ activation_fn: str = "gelu-approximate",
275
+ attention_bias: bool = False,
276
+ qk_norm: bool = True,
277
+ norm_elementwise_affine: bool = True,
278
+ norm_eps: float = 1e-5,
279
+ final_dropout: bool = True,
280
+ ff_inner_dim: Optional[int] = None,
281
+ ff_bias: bool = True,
282
+ attention_out_bias: bool = True,
283
+ ):
284
+ super().__init__()
285
+
286
+ # 1. Self Attention
287
+ self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True)
288
+
289
+ self.attn1 = Attention(
290
+ query_dim=dim,
291
+ dim_head=attention_head_dim,
292
+ heads=num_attention_heads,
293
+ qk_norm="layer_norm" if qk_norm else None,
294
+ eps=1e-6,
295
+ bias=attention_bias,
296
+ out_bias=attention_out_bias,
297
+ processor=CogVideoXAttnProcessor2_0(),
298
+ )
299
+
300
+ # 2. Feed Forward
301
+ self.norm2 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True)
302
+
303
+ self.ff = FeedForward(
304
+ dim,
305
+ dropout=dropout,
306
+ activation_fn=activation_fn,
307
+ final_dropout=final_dropout,
308
+ inner_dim=ff_inner_dim,
309
+ bias=ff_bias,
310
+ )
311
+
312
+ def forward(
313
+ self,
314
+ hidden_states: torch.Tensor,
315
+ encoder_hidden_states: torch.Tensor,
316
+ temb: torch.Tensor,
317
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
318
+ ) -> torch.Tensor:
319
+ text_seq_length = encoder_hidden_states.size(1)
320
+
321
+ # norm & modulate
322
+ norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1(
323
+ hidden_states, encoder_hidden_states, temb
324
+ )
325
+
326
+ # attention
327
+ attn_hidden_states, attn_encoder_hidden_states = self.attn1(
328
+ hidden_states=norm_hidden_states,
329
+ encoder_hidden_states=norm_encoder_hidden_states,
330
+ image_rotary_emb=image_rotary_emb,
331
+ )
332
+
333
+ hidden_states = hidden_states + gate_msa * attn_hidden_states
334
+ encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states
335
+
336
+ # norm & modulate
337
+ norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2(
338
+ hidden_states, encoder_hidden_states, temb
339
+ )
340
+
341
+ # feed-forward
342
+ norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1)
343
+ ff_output = self.ff(norm_hidden_states)
344
+
345
+ hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:]
346
+ encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length]
347
+
348
+ return hidden_states, encoder_hidden_states
349
+
350
+
351
+ class ConsisIDTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
352
+ """
353
+ A Transformer model for video-like data in [ConsisID](https://github.com/PKU-YuanGroup/ConsisID).
354
+
355
+ Parameters:
356
+ num_attention_heads (`int`, defaults to `30`):
357
+ The number of heads to use for multi-head attention.
358
+ attention_head_dim (`int`, defaults to `64`):
359
+ The number of channels in each head.
360
+ in_channels (`int`, defaults to `16`):
361
+ The number of channels in the input.
362
+ out_channels (`int`, *optional*, defaults to `16`):
363
+ The number of channels in the output.
364
+ flip_sin_to_cos (`bool`, defaults to `True`):
365
+ Whether to flip the sin to cos in the time embedding.
366
+ time_embed_dim (`int`, defaults to `512`):
367
+ Output dimension of timestep embeddings.
368
+ text_embed_dim (`int`, defaults to `4096`):
369
+ Input dimension of text embeddings from the text encoder.
370
+ num_layers (`int`, defaults to `30`):
371
+ The number of layers of Transformer blocks to use.
372
+ dropout (`float`, defaults to `0.0`):
373
+ The dropout probability to use.
374
+ attention_bias (`bool`, defaults to `True`):
375
+ Whether to use bias in the attention projection layers.
376
+ sample_width (`int`, defaults to `90`):
377
+ The width of the input latents.
378
+ sample_height (`int`, defaults to `60`):
379
+ The height of the input latents.
380
+ sample_frames (`int`, defaults to `49`):
381
+ The number of frames in the input latents. Note that this parameter was incorrectly initialized to 49
382
+ instead of 13 because ConsisID processed 13 latent frames at once in its default and recommended settings,
383
+ but cannot be changed to the correct value to ensure backwards compatibility. To create a transformer with
384
+ K latent frames, the correct value to pass here would be: ((K - 1) * temporal_compression_ratio + 1).
385
+ patch_size (`int`, defaults to `2`):
386
+ The size of the patches to use in the patch embedding layer.
387
+ temporal_compression_ratio (`int`, defaults to `4`):
388
+ The compression ratio across the temporal dimension. See documentation for `sample_frames`.
389
+ max_text_seq_length (`int`, defaults to `226`):
390
+ The maximum sequence length of the input text embeddings.
391
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
392
+ Activation function to use in feed-forward.
393
+ timestep_activation_fn (`str`, defaults to `"silu"`):
394
+ Activation function to use when generating the timestep embeddings.
395
+ norm_elementwise_affine (`bool`, defaults to `True`):
396
+ Whether to use elementwise affine in normalization layers.
397
+ norm_eps (`float`, defaults to `1e-5`):
398
+ The epsilon value to use in normalization layers.
399
+ spatial_interpolation_scale (`float`, defaults to `1.875`):
400
+ Scaling factor to apply in 3D positional embeddings across spatial dimensions.
401
+ temporal_interpolation_scale (`float`, defaults to `1.0`):
402
+ Scaling factor to apply in 3D positional embeddings across temporal dimensions.
403
+ is_train_face (`bool`, defaults to `False`):
404
+ Whether to use enable the identity-preserving module during the training process. When set to `True`, the
405
+ model will focus on identity-preserving tasks.
406
+ is_kps (`bool`, defaults to `False`):
407
+ Whether to enable keypoint for global facial extractor. If `True`, keypoints will be in the model.
408
+ cross_attn_interval (`int`, defaults to `2`):
409
+ The interval between cross-attention layers in the Transformer architecture. A larger value may reduce the
410
+ frequency of cross-attention computations, which can help reduce computational overhead.
411
+ cross_attn_dim_head (`int`, optional, defaults to `128`):
412
+ The dimensionality of each attention head in the cross-attention layers of the Transformer architecture. A
413
+ larger value increases the capacity to attend to more complex patterns, but also increases memory and
414
+ computation costs.
415
+ cross_attn_num_heads (`int`, optional, defaults to `16`):
416
+ The number of attention heads in the cross-attention layers. More heads allow for more parallel attention
417
+ mechanisms, capturing diverse relationships between different components of the input, but can also
418
+ increase computational requirements.
419
+ LFE_id_dim (`int`, optional, defaults to `1280`):
420
+ The dimensionality of the identity vector used in the Local Facial Extractor (LFE). This vector represents
421
+ the identity features of a face, which are important for tasks like face recognition and identity
422
+ preservation across different frames.
423
+ LFE_vit_dim (`int`, optional, defaults to `1024`):
424
+ The dimension of the vision transformer (ViT) output used in the Local Facial Extractor (LFE). This value
425
+ dictates the size of the transformer-generated feature vectors that will be processed for facial feature
426
+ extraction.
427
+ LFE_depth (`int`, optional, defaults to `10`):
428
+ The number of layers in the Local Facial Extractor (LFE). Increasing the depth allows the model to capture
429
+ more complex representations of facial features, but also increases the computational load.
430
+ LFE_dim_head (`int`, optional, defaults to `64`):
431
+ The dimensionality of each attention head in the Local Facial Extractor (LFE). This parameter affects how
432
+ finely the model can process and focus on different parts of the facial features during the extraction
433
+ process.
434
+ LFE_num_heads (`int`, optional, defaults to `16`):
435
+ The number of attention heads in the Local Facial Extractor (LFE). More heads can improve the model's
436
+ ability to capture diverse facial features, but at the cost of increased computational complexity.
437
+ LFE_num_id_token (`int`, optional, defaults to `5`):
438
+ The number of identity tokens used in the Local Facial Extractor (LFE). This defines how many
439
+ identity-related tokens the model will process to ensure face identity preservation during feature
440
+ extraction.
441
+ LFE_num_querie (`int`, optional, defaults to `32`):
442
+ The number of query tokens used in the Local Facial Extractor (LFE). These tokens are used to capture
443
+ high-frequency face-related information that aids in accurate facial feature extraction.
444
+ LFE_output_dim (`int`, optional, defaults to `2048`):
445
+ The output dimension of the Local Facial Extractor (LFE). This dimension determines the size of the feature
446
+ vectors produced by the LFE module, which will be used for subsequent tasks such as face recognition or
447
+ tracking.
448
+ LFE_ff_mult (`int`, optional, defaults to `4`):
449
+ The multiplication factor applied to the feed-forward network's hidden layer size in the Local Facial
450
+ Extractor (LFE). A higher value increases the model's capacity to learn more complex facial feature
451
+ transformations, but also increases the computation and memory requirements.
452
+ LFE_num_scale (`int`, optional, defaults to `5`):
453
+ The number of different scales visual feature. A higher value increases the model's capacity to learn more
454
+ complex facial feature transformations, but also increases the computation and memory requirements.
455
+ local_face_scale (`float`, defaults to `1.0`):
456
+ A scaling factor used to adjust the importance of local facial features in the model. This can influence
457
+ how strongly the model focuses on high frequency face-related content.
458
+ """
459
+
460
+ _supports_gradient_checkpointing = True
461
+
462
+ @register_to_config
463
+ def __init__(
464
+ self,
465
+ num_attention_heads: int = 30,
466
+ attention_head_dim: int = 64,
467
+ in_channels: int = 16,
468
+ out_channels: Optional[int] = 16,
469
+ flip_sin_to_cos: bool = True,
470
+ freq_shift: int = 0,
471
+ time_embed_dim: int = 512,
472
+ text_embed_dim: int = 4096,
473
+ num_layers: int = 30,
474
+ dropout: float = 0.0,
475
+ attention_bias: bool = True,
476
+ sample_width: int = 90,
477
+ sample_height: int = 60,
478
+ sample_frames: int = 49,
479
+ patch_size: int = 2,
480
+ temporal_compression_ratio: int = 4,
481
+ max_text_seq_length: int = 226,
482
+ activation_fn: str = "gelu-approximate",
483
+ timestep_activation_fn: str = "silu",
484
+ norm_elementwise_affine: bool = True,
485
+ norm_eps: float = 1e-5,
486
+ spatial_interpolation_scale: float = 1.875,
487
+ temporal_interpolation_scale: float = 1.0,
488
+ use_rotary_positional_embeddings: bool = False,
489
+ use_learned_positional_embeddings: bool = False,
490
+ is_train_face: bool = False,
491
+ is_kps: bool = False,
492
+ cross_attn_interval: int = 2,
493
+ cross_attn_dim_head: int = 128,
494
+ cross_attn_num_heads: int = 16,
495
+ LFE_id_dim: int = 1280,
496
+ LFE_vit_dim: int = 1024,
497
+ LFE_depth: int = 10,
498
+ LFE_dim_head: int = 64,
499
+ LFE_num_heads: int = 16,
500
+ LFE_num_id_token: int = 5,
501
+ LFE_num_querie: int = 32,
502
+ LFE_output_dim: int = 2048,
503
+ LFE_ff_mult: int = 4,
504
+ LFE_num_scale: int = 5,
505
+ local_face_scale: float = 1.0,
506
+ ):
507
+ super().__init__()
508
+ inner_dim = num_attention_heads * attention_head_dim
509
+
510
+ if not use_rotary_positional_embeddings and use_learned_positional_embeddings:
511
+ raise ValueError(
512
+ "There are no ConsisID checkpoints available with disable rotary embeddings and learned positional "
513
+ "embeddings. If you're using a custom model and/or believe this should be supported, please open an "
514
+ "issue at https://github.com/huggingface/diffusers/issues."
515
+ )
516
+
517
+ # 1. Patch embedding
518
+ self.patch_embed = CogVideoXPatchEmbed(
519
+ patch_size=patch_size,
520
+ in_channels=in_channels,
521
+ embed_dim=inner_dim,
522
+ text_embed_dim=text_embed_dim,
523
+ bias=True,
524
+ sample_width=sample_width,
525
+ sample_height=sample_height,
526
+ sample_frames=sample_frames,
527
+ temporal_compression_ratio=temporal_compression_ratio,
528
+ max_text_seq_length=max_text_seq_length,
529
+ spatial_interpolation_scale=spatial_interpolation_scale,
530
+ temporal_interpolation_scale=temporal_interpolation_scale,
531
+ use_positional_embeddings=not use_rotary_positional_embeddings,
532
+ use_learned_positional_embeddings=use_learned_positional_embeddings,
533
+ )
534
+ self.embedding_dropout = nn.Dropout(dropout)
535
+
536
+ # 2. Time embeddings
537
+ self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift)
538
+ self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn)
539
+
540
+ # 3. Define spatio-temporal transformers blocks
541
+ self.transformer_blocks = nn.ModuleList(
542
+ [
543
+ ConsisIDBlock(
544
+ dim=inner_dim,
545
+ num_attention_heads=num_attention_heads,
546
+ attention_head_dim=attention_head_dim,
547
+ time_embed_dim=time_embed_dim,
548
+ dropout=dropout,
549
+ activation_fn=activation_fn,
550
+ attention_bias=attention_bias,
551
+ norm_elementwise_affine=norm_elementwise_affine,
552
+ norm_eps=norm_eps,
553
+ )
554
+ for _ in range(num_layers)
555
+ ]
556
+ )
557
+ self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine)
558
+
559
+ # 4. Output blocks
560
+ self.norm_out = AdaLayerNorm(
561
+ embedding_dim=time_embed_dim,
562
+ output_dim=2 * inner_dim,
563
+ norm_elementwise_affine=norm_elementwise_affine,
564
+ norm_eps=norm_eps,
565
+ chunk_dim=1,
566
+ )
567
+ self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels)
568
+
569
+ self.is_train_face = is_train_face
570
+ self.is_kps = is_kps
571
+
572
+ # 5. Define identity-preserving config
573
+ if is_train_face:
574
+ # LFE configs
575
+ self.LFE_id_dim = LFE_id_dim
576
+ self.LFE_vit_dim = LFE_vit_dim
577
+ self.LFE_depth = LFE_depth
578
+ self.LFE_dim_head = LFE_dim_head
579
+ self.LFE_num_heads = LFE_num_heads
580
+ self.LFE_num_id_token = LFE_num_id_token
581
+ self.LFE_num_querie = LFE_num_querie
582
+ self.LFE_output_dim = LFE_output_dim
583
+ self.LFE_ff_mult = LFE_ff_mult
584
+ self.LFE_num_scale = LFE_num_scale
585
+ # cross configs
586
+ self.inner_dim = inner_dim
587
+ self.cross_attn_interval = cross_attn_interval
588
+ self.num_cross_attn = num_layers // cross_attn_interval
589
+ self.cross_attn_dim_head = cross_attn_dim_head
590
+ self.cross_attn_num_heads = cross_attn_num_heads
591
+ self.cross_attn_kv_dim = int(self.inner_dim / 3 * 2)
592
+ self.local_face_scale = local_face_scale
593
+ # face modules
594
+ self._init_face_inputs()
595
+
596
+ self.gradient_checkpointing = False
597
+
598
+ def _init_face_inputs(self):
599
+ self.local_facial_extractor = LocalFacialExtractor(
600
+ id_dim=self.LFE_id_dim,
601
+ vit_dim=self.LFE_vit_dim,
602
+ depth=self.LFE_depth,
603
+ dim_head=self.LFE_dim_head,
604
+ heads=self.LFE_num_heads,
605
+ num_id_token=self.LFE_num_id_token,
606
+ num_queries=self.LFE_num_querie,
607
+ output_dim=self.LFE_output_dim,
608
+ ff_mult=self.LFE_ff_mult,
609
+ num_scale=self.LFE_num_scale,
610
+ )
611
+ self.perceiver_cross_attention = nn.ModuleList(
612
+ [
613
+ PerceiverCrossAttention(
614
+ dim=self.inner_dim,
615
+ dim_head=self.cross_attn_dim_head,
616
+ heads=self.cross_attn_num_heads,
617
+ kv_dim=self.cross_attn_kv_dim,
618
+ )
619
+ for _ in range(self.num_cross_attn)
620
+ ]
621
+ )
622
+
623
+ @property
624
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
625
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
626
+ r"""
627
+ Returns:
628
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
629
+ indexed by its weight name.
630
+ """
631
+ # set recursively
632
+ processors = {}
633
+
634
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
635
+ if hasattr(module, "get_processor"):
636
+ processors[f"{name}.processor"] = module.get_processor()
637
+
638
+ for sub_name, child in module.named_children():
639
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
640
+
641
+ return processors
642
+
643
+ for name, module in self.named_children():
644
+ fn_recursive_add_processors(name, module, processors)
645
+
646
+ return processors
647
+
648
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
649
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
650
+ r"""
651
+ Sets the attention processor to use to compute attention.
652
+
653
+ Parameters:
654
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
655
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
656
+ for **all** `Attention` layers.
657
+
658
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
659
+ processor. This is strongly recommended when setting trainable attention processors.
660
+
661
+ """
662
+ count = len(self.attn_processors.keys())
663
+
664
+ if isinstance(processor, dict) and len(processor) != count:
665
+ raise ValueError(
666
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
667
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
668
+ )
669
+
670
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
671
+ if hasattr(module, "set_processor"):
672
+ if not isinstance(processor, dict):
673
+ module.set_processor(processor)
674
+ else:
675
+ module.set_processor(processor.pop(f"{name}.processor"))
676
+
677
+ for sub_name, child in module.named_children():
678
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
679
+
680
+ for name, module in self.named_children():
681
+ fn_recursive_attn_processor(name, module, processor)
682
+
683
+ def forward(
684
+ self,
685
+ hidden_states: torch.Tensor,
686
+ encoder_hidden_states: torch.Tensor,
687
+ timestep: Union[int, float, torch.LongTensor],
688
+ timestep_cond: Optional[torch.Tensor] = None,
689
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
690
+ attention_kwargs: Optional[Dict[str, Any]] = None,
691
+ id_cond: Optional[torch.Tensor] = None,
692
+ id_vit_hidden: Optional[torch.Tensor] = None,
693
+ return_dict: bool = True,
694
+ ):
695
+ if attention_kwargs is not None:
696
+ attention_kwargs = attention_kwargs.copy()
697
+ lora_scale = attention_kwargs.pop("scale", 1.0)
698
+ else:
699
+ lora_scale = 1.0
700
+
701
+ if USE_PEFT_BACKEND:
702
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
703
+ scale_lora_layers(self, lora_scale)
704
+ else:
705
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
706
+ logger.warning(
707
+ "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective."
708
+ )
709
+
710
+ # fuse clip and insightface
711
+ valid_face_emb = None
712
+ if self.is_train_face:
713
+ id_cond = id_cond.to(device=hidden_states.device, dtype=hidden_states.dtype)
714
+ id_vit_hidden = [
715
+ tensor.to(device=hidden_states.device, dtype=hidden_states.dtype) for tensor in id_vit_hidden
716
+ ]
717
+ valid_face_emb = self.local_facial_extractor(
718
+ id_cond, id_vit_hidden
719
+ ) # torch.Size([1, 1280]), list[5](torch.Size([1, 577, 1024])) -> torch.Size([1, 32, 2048])
720
+
721
+ batch_size, num_frames, channels, height, width = hidden_states.shape
722
+
723
+ # 1. Time embedding
724
+ timesteps = timestep
725
+ t_emb = self.time_proj(timesteps)
726
+
727
+ # timesteps does not contain any weights and will always return f32 tensors
728
+ # but time_embedding might actually be running in fp16. so we need to cast here.
729
+ # there might be better ways to encapsulate this.
730
+ t_emb = t_emb.to(dtype=hidden_states.dtype)
731
+ emb = self.time_embedding(t_emb, timestep_cond)
732
+
733
+ # 2. Patch embedding
734
+ # torch.Size([1, 226, 4096]) torch.Size([1, 13, 32, 60, 90])
735
+ hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) # torch.Size([1, 17776, 3072])
736
+ hidden_states = self.embedding_dropout(hidden_states) # torch.Size([1, 17776, 3072])
737
+
738
+ text_seq_length = encoder_hidden_states.shape[1]
739
+ encoder_hidden_states = hidden_states[:, :text_seq_length] # torch.Size([1, 226, 3072])
740
+ hidden_states = hidden_states[:, text_seq_length:] # torch.Size([1, 17550, 3072])
741
+
742
+ # 3. Transformer blocks
743
+ ca_idx = 0
744
+ for i, block in enumerate(self.transformer_blocks):
745
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
746
+ hidden_states, encoder_hidden_states = self._gradient_checkpointing_func(
747
+ block,
748
+ hidden_states,
749
+ encoder_hidden_states,
750
+ emb,
751
+ image_rotary_emb,
752
+ )
753
+ else:
754
+ hidden_states, encoder_hidden_states = block(
755
+ hidden_states=hidden_states,
756
+ encoder_hidden_states=encoder_hidden_states,
757
+ temb=emb,
758
+ image_rotary_emb=image_rotary_emb,
759
+ )
760
+
761
+ if self.is_train_face:
762
+ if i % self.cross_attn_interval == 0 and valid_face_emb is not None:
763
+ hidden_states = hidden_states + self.local_face_scale * self.perceiver_cross_attention[ca_idx](
764
+ valid_face_emb, hidden_states
765
+ ) # torch.Size([2, 32, 2048]) torch.Size([2, 17550, 3072])
766
+ ca_idx += 1
767
+
768
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
769
+ hidden_states = self.norm_final(hidden_states)
770
+ hidden_states = hidden_states[:, text_seq_length:]
771
+
772
+ # 4. Final block
773
+ hidden_states = self.norm_out(hidden_states, temb=emb)
774
+ hidden_states = self.proj_out(hidden_states)
775
+
776
+ # 5. Unpatchify
777
+ # Note: we use `-1` instead of `channels`:
778
+ # - It is okay to `channels` use for ConsisID (number of input channels is equal to output channels)
779
+ p = self.config.patch_size
780
+ output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, -1, p, p)
781
+ output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4)
782
+
783
+ if USE_PEFT_BACKEND:
784
+ # remove `lora_scale` from each PEFT layer
785
+ unscale_lora_layers(self, lora_scale)
786
+
787
+ if not return_dict:
788
+ return (output,)
789
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/dit_transformer_2d.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Dict, Optional
15
+
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+
20
+ from ...configuration_utils import ConfigMixin, register_to_config
21
+ from ...utils import logging
22
+ from ..attention import BasicTransformerBlock
23
+ from ..embeddings import PatchEmbed
24
+ from ..modeling_outputs import Transformer2DModelOutput
25
+ from ..modeling_utils import ModelMixin
26
+
27
+
28
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
29
+
30
+
31
+ class DiTTransformer2DModel(ModelMixin, ConfigMixin):
32
+ r"""
33
+ A 2D Transformer model as introduced in DiT (https://huggingface.co/papers/2212.09748).
34
+
35
+ Parameters:
36
+ num_attention_heads (int, optional, defaults to 16): The number of heads to use for multi-head attention.
37
+ attention_head_dim (int, optional, defaults to 72): The number of channels in each head.
38
+ in_channels (int, defaults to 4): The number of channels in the input.
39
+ out_channels (int, optional):
40
+ The number of channels in the output. Specify this parameter if the output channel number differs from the
41
+ input.
42
+ num_layers (int, optional, defaults to 28): The number of layers of Transformer blocks to use.
43
+ dropout (float, optional, defaults to 0.0): The dropout probability to use within the Transformer blocks.
44
+ norm_num_groups (int, optional, defaults to 32):
45
+ Number of groups for group normalization within Transformer blocks.
46
+ attention_bias (bool, optional, defaults to True):
47
+ Configure if the Transformer blocks' attention should contain a bias parameter.
48
+ sample_size (int, defaults to 32):
49
+ The width of the latent images. This parameter is fixed during training.
50
+ patch_size (int, defaults to 2):
51
+ Size of the patches the model processes, relevant for architectures working on non-sequential data.
52
+ activation_fn (str, optional, defaults to "gelu-approximate"):
53
+ Activation function to use in feed-forward networks within Transformer blocks.
54
+ num_embeds_ada_norm (int, optional, defaults to 1000):
55
+ Number of embeddings for AdaLayerNorm, fixed during training and affects the maximum denoising steps during
56
+ inference.
57
+ upcast_attention (bool, optional, defaults to False):
58
+ If true, upcasts the attention mechanism dimensions for potentially improved performance.
59
+ norm_type (str, optional, defaults to "ada_norm_zero"):
60
+ Specifies the type of normalization used, can be 'ada_norm_zero'.
61
+ norm_elementwise_affine (bool, optional, defaults to False):
62
+ If true, enables element-wise affine parameters in the normalization layers.
63
+ norm_eps (float, optional, defaults to 1e-5):
64
+ A small constant added to the denominator in normalization layers to prevent division by zero.
65
+ """
66
+
67
+ _skip_layerwise_casting_patterns = ["pos_embed", "norm"]
68
+ _supports_gradient_checkpointing = True
69
+ _supports_group_offloading = False
70
+
71
+ @register_to_config
72
+ def __init__(
73
+ self,
74
+ num_attention_heads: int = 16,
75
+ attention_head_dim: int = 72,
76
+ in_channels: int = 4,
77
+ out_channels: Optional[int] = None,
78
+ num_layers: int = 28,
79
+ dropout: float = 0.0,
80
+ norm_num_groups: int = 32,
81
+ attention_bias: bool = True,
82
+ sample_size: int = 32,
83
+ patch_size: int = 2,
84
+ activation_fn: str = "gelu-approximate",
85
+ num_embeds_ada_norm: Optional[int] = 1000,
86
+ upcast_attention: bool = False,
87
+ norm_type: str = "ada_norm_zero",
88
+ norm_elementwise_affine: bool = False,
89
+ norm_eps: float = 1e-5,
90
+ ):
91
+ super().__init__()
92
+
93
+ # Validate inputs.
94
+ if norm_type != "ada_norm_zero":
95
+ raise NotImplementedError(
96
+ f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'."
97
+ )
98
+ elif norm_type == "ada_norm_zero" and num_embeds_ada_norm is None:
99
+ raise ValueError(
100
+ f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None."
101
+ )
102
+
103
+ # Set some common variables used across the board.
104
+ self.attention_head_dim = attention_head_dim
105
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
106
+ self.out_channels = in_channels if out_channels is None else out_channels
107
+ self.gradient_checkpointing = False
108
+
109
+ # 2. Initialize the position embedding and transformer blocks.
110
+ self.height = self.config.sample_size
111
+ self.width = self.config.sample_size
112
+
113
+ self.patch_size = self.config.patch_size
114
+ self.pos_embed = PatchEmbed(
115
+ height=self.config.sample_size,
116
+ width=self.config.sample_size,
117
+ patch_size=self.config.patch_size,
118
+ in_channels=self.config.in_channels,
119
+ embed_dim=self.inner_dim,
120
+ )
121
+
122
+ self.transformer_blocks = nn.ModuleList(
123
+ [
124
+ BasicTransformerBlock(
125
+ self.inner_dim,
126
+ self.config.num_attention_heads,
127
+ self.config.attention_head_dim,
128
+ dropout=self.config.dropout,
129
+ activation_fn=self.config.activation_fn,
130
+ num_embeds_ada_norm=self.config.num_embeds_ada_norm,
131
+ attention_bias=self.config.attention_bias,
132
+ upcast_attention=self.config.upcast_attention,
133
+ norm_type=norm_type,
134
+ norm_elementwise_affine=self.config.norm_elementwise_affine,
135
+ norm_eps=self.config.norm_eps,
136
+ )
137
+ for _ in range(self.config.num_layers)
138
+ ]
139
+ )
140
+
141
+ # 3. Output blocks.
142
+ self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6)
143
+ self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim)
144
+ self.proj_out_2 = nn.Linear(
145
+ self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels
146
+ )
147
+
148
+ def forward(
149
+ self,
150
+ hidden_states: torch.Tensor,
151
+ timestep: Optional[torch.LongTensor] = None,
152
+ class_labels: Optional[torch.LongTensor] = None,
153
+ cross_attention_kwargs: Dict[str, Any] = None,
154
+ return_dict: bool = True,
155
+ ):
156
+ """
157
+ The [`DiTTransformer2DModel`] forward method.
158
+
159
+ Args:
160
+ hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
161
+ Input `hidden_states`.
162
+ timestep ( `torch.LongTensor`, *optional*):
163
+ Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
164
+ class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
165
+ Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
166
+ `AdaLayerZeroNorm`.
167
+ cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
168
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
169
+ `self.processor` in
170
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
171
+ return_dict (`bool`, *optional*, defaults to `True`):
172
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
173
+ tuple.
174
+
175
+ Returns:
176
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
177
+ `tuple` where the first element is the sample tensor.
178
+ """
179
+ # 1. Input
180
+ height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size
181
+ hidden_states = self.pos_embed(hidden_states)
182
+
183
+ # 2. Blocks
184
+ for block in self.transformer_blocks:
185
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
186
+ hidden_states = self._gradient_checkpointing_func(
187
+ block,
188
+ hidden_states,
189
+ None,
190
+ None,
191
+ None,
192
+ timestep,
193
+ cross_attention_kwargs,
194
+ class_labels,
195
+ )
196
+ else:
197
+ hidden_states = block(
198
+ hidden_states,
199
+ attention_mask=None,
200
+ encoder_hidden_states=None,
201
+ encoder_attention_mask=None,
202
+ timestep=timestep,
203
+ cross_attention_kwargs=cross_attention_kwargs,
204
+ class_labels=class_labels,
205
+ )
206
+
207
+ # 3. Output
208
+ conditioning = self.transformer_blocks[0].norm1.emb(timestep, class_labels, hidden_dtype=hidden_states.dtype)
209
+ shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)
210
+ hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]
211
+ hidden_states = self.proj_out_2(hidden_states)
212
+
213
+ # unpatchify
214
+ height = width = int(hidden_states.shape[1] ** 0.5)
215
+ hidden_states = hidden_states.reshape(
216
+ shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
217
+ )
218
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
219
+ output = hidden_states.reshape(
220
+ shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
221
+ )
222
+
223
+ if not return_dict:
224
+ return (output,)
225
+
226
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/dual_transformer_2d.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional
15
+
16
+ from torch import nn
17
+
18
+ from ..modeling_outputs import Transformer2DModelOutput
19
+ from .transformer_2d import Transformer2DModel
20
+
21
+
22
+ class DualTransformer2DModel(nn.Module):
23
+ """
24
+ Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.
25
+
26
+ Parameters:
27
+ num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
28
+ attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
29
+ in_channels (`int`, *optional*):
30
+ Pass if the input is continuous. The number of channels in the input and output.
31
+ num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
32
+ dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use.
33
+ cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
34
+ sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
35
+ Note that this is fixed at training time as it is used for learning a number of position embeddings. See
36
+ `ImagePositionalEmbeddings`.
37
+ num_vector_embeds (`int`, *optional*):
38
+ Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
39
+ Includes the class for the masked latent pixel.
40
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
41
+ num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
42
+ The number of diffusion steps used during training. Note that this is fixed at training time as it is used
43
+ to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
44
+ up to but not more than steps than `num_embeds_ada_norm`.
45
+ attention_bias (`bool`, *optional*):
46
+ Configure if the TransformerBlocks' attention should contain a bias parameter.
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ num_attention_heads: int = 16,
52
+ attention_head_dim: int = 88,
53
+ in_channels: Optional[int] = None,
54
+ num_layers: int = 1,
55
+ dropout: float = 0.0,
56
+ norm_num_groups: int = 32,
57
+ cross_attention_dim: Optional[int] = None,
58
+ attention_bias: bool = False,
59
+ sample_size: Optional[int] = None,
60
+ num_vector_embeds: Optional[int] = None,
61
+ activation_fn: str = "geglu",
62
+ num_embeds_ada_norm: Optional[int] = None,
63
+ ):
64
+ super().__init__()
65
+ self.transformers = nn.ModuleList(
66
+ [
67
+ Transformer2DModel(
68
+ num_attention_heads=num_attention_heads,
69
+ attention_head_dim=attention_head_dim,
70
+ in_channels=in_channels,
71
+ num_layers=num_layers,
72
+ dropout=dropout,
73
+ norm_num_groups=norm_num_groups,
74
+ cross_attention_dim=cross_attention_dim,
75
+ attention_bias=attention_bias,
76
+ sample_size=sample_size,
77
+ num_vector_embeds=num_vector_embeds,
78
+ activation_fn=activation_fn,
79
+ num_embeds_ada_norm=num_embeds_ada_norm,
80
+ )
81
+ for _ in range(2)
82
+ ]
83
+ )
84
+
85
+ # Variables that can be set by a pipeline:
86
+
87
+ # The ratio of transformer1 to transformer2's output states to be combined during inference
88
+ self.mix_ratio = 0.5
89
+
90
+ # The shape of `encoder_hidden_states` is expected to be
91
+ # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
92
+ self.condition_lengths = [77, 257]
93
+
94
+ # Which transformer to use to encode which condition.
95
+ # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
96
+ self.transformer_index_for_condition = [1, 0]
97
+
98
+ def forward(
99
+ self,
100
+ hidden_states,
101
+ encoder_hidden_states,
102
+ timestep=None,
103
+ attention_mask=None,
104
+ cross_attention_kwargs=None,
105
+ return_dict: bool = True,
106
+ ):
107
+ """
108
+ Args:
109
+ hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
110
+ When continuous, `torch.Tensor` of shape `(batch size, channel, height, width)`): Input hidden_states.
111
+ encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
112
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
113
+ self-attention.
114
+ timestep ( `torch.long`, *optional*):
115
+ Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
116
+ attention_mask (`torch.Tensor`, *optional*):
117
+ Optional attention mask to be applied in Attention.
118
+ cross_attention_kwargs (`dict`, *optional*):
119
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
120
+ `self.processor` in
121
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
122
+ return_dict (`bool`, *optional*, defaults to `True`):
123
+ Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
124
+ tuple.
125
+
126
+ Returns:
127
+ [`~models.transformers.transformer_2d.Transformer2DModelOutput`] or `tuple`:
128
+ [`~models.transformers.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a
129
+ `tuple`. When returning a tuple, the first element is the sample tensor.
130
+ """
131
+ input_states = hidden_states
132
+
133
+ encoded_states = []
134
+ tokens_start = 0
135
+ # attention_mask is not used yet
136
+ for i in range(2):
137
+ # for each of the two transformers, pass the corresponding condition tokens
138
+ condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
139
+ transformer_index = self.transformer_index_for_condition[i]
140
+ encoded_state = self.transformers[transformer_index](
141
+ input_states,
142
+ encoder_hidden_states=condition_state,
143
+ timestep=timestep,
144
+ cross_attention_kwargs=cross_attention_kwargs,
145
+ return_dict=False,
146
+ )[0]
147
+ encoded_states.append(encoded_state - input_states)
148
+ tokens_start += self.condition_lengths[i]
149
+
150
+ output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
151
+ output_states = output_states + input_states
152
+
153
+ if not return_dict:
154
+ return (output_states,)
155
+
156
+ return Transformer2DModelOutput(sample=output_states)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/hunyuan_transformer_2d.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 HunyuanDiT Authors, Qixun Wang and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, Optional, Union
15
+
16
+ import torch
17
+ from torch import nn
18
+
19
+ from ...configuration_utils import ConfigMixin, register_to_config
20
+ from ...utils import logging
21
+ from ...utils.torch_utils import maybe_allow_in_graph
22
+ from ..attention import FeedForward
23
+ from ..attention_processor import Attention, AttentionProcessor, FusedHunyuanAttnProcessor2_0, HunyuanAttnProcessor2_0
24
+ from ..embeddings import (
25
+ HunyuanCombinedTimestepTextSizeStyleEmbedding,
26
+ PatchEmbed,
27
+ PixArtAlphaTextProjection,
28
+ )
29
+ from ..modeling_outputs import Transformer2DModelOutput
30
+ from ..modeling_utils import ModelMixin
31
+ from ..normalization import AdaLayerNormContinuous, FP32LayerNorm
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ class AdaLayerNormShift(nn.Module):
38
+ r"""
39
+ Norm layer modified to incorporate timestep embeddings.
40
+
41
+ Parameters:
42
+ embedding_dim (`int`): The size of each embedding vector.
43
+ num_embeddings (`int`): The size of the embeddings dictionary.
44
+ """
45
+
46
+ def __init__(self, embedding_dim: int, elementwise_affine=True, eps=1e-6):
47
+ super().__init__()
48
+ self.silu = nn.SiLU()
49
+ self.linear = nn.Linear(embedding_dim, embedding_dim)
50
+ self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, eps=eps)
51
+
52
+ def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor:
53
+ shift = self.linear(self.silu(emb.to(torch.float32)).to(emb.dtype))
54
+ x = self.norm(x) + shift.unsqueeze(dim=1)
55
+ return x
56
+
57
+
58
+ @maybe_allow_in_graph
59
+ class HunyuanDiTBlock(nn.Module):
60
+ r"""
61
+ Transformer block used in Hunyuan-DiT model (https://github.com/Tencent/HunyuanDiT). Allow skip connection and
62
+ QKNorm
63
+
64
+ Parameters:
65
+ dim (`int`):
66
+ The number of channels in the input and output.
67
+ num_attention_heads (`int`):
68
+ The number of headsto use for multi-head attention.
69
+ cross_attention_dim (`int`,*optional*):
70
+ The size of the encoder_hidden_states vector for cross attention.
71
+ dropout(`float`, *optional*, defaults to 0.0):
72
+ The dropout probability to use.
73
+ activation_fn (`str`,*optional*, defaults to `"geglu"`):
74
+ Activation function to be used in feed-forward. .
75
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
76
+ Whether to use learnable elementwise affine parameters for normalization.
77
+ norm_eps (`float`, *optional*, defaults to 1e-6):
78
+ A small constant added to the denominator in normalization layers to prevent division by zero.
79
+ final_dropout (`bool` *optional*, defaults to False):
80
+ Whether to apply a final dropout after the last feed-forward layer.
81
+ ff_inner_dim (`int`, *optional*):
82
+ The size of the hidden layer in the feed-forward block. Defaults to `None`.
83
+ ff_bias (`bool`, *optional*, defaults to `True`):
84
+ Whether to use bias in the feed-forward block.
85
+ skip (`bool`, *optional*, defaults to `False`):
86
+ Whether to use skip connection. Defaults to `False` for down-blocks and mid-blocks.
87
+ qk_norm (`bool`, *optional*, defaults to `True`):
88
+ Whether to use normalization in QK calculation. Defaults to `True`.
89
+ """
90
+
91
+ def __init__(
92
+ self,
93
+ dim: int,
94
+ num_attention_heads: int,
95
+ cross_attention_dim: int = 1024,
96
+ dropout=0.0,
97
+ activation_fn: str = "geglu",
98
+ norm_elementwise_affine: bool = True,
99
+ norm_eps: float = 1e-6,
100
+ final_dropout: bool = False,
101
+ ff_inner_dim: Optional[int] = None,
102
+ ff_bias: bool = True,
103
+ skip: bool = False,
104
+ qk_norm: bool = True,
105
+ ):
106
+ super().__init__()
107
+
108
+ # Define 3 blocks. Each block has its own normalization layer.
109
+ # NOTE: when new version comes, check norm2 and norm 3
110
+ # 1. Self-Attn
111
+ self.norm1 = AdaLayerNormShift(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
112
+
113
+ self.attn1 = Attention(
114
+ query_dim=dim,
115
+ cross_attention_dim=None,
116
+ dim_head=dim // num_attention_heads,
117
+ heads=num_attention_heads,
118
+ qk_norm="layer_norm" if qk_norm else None,
119
+ eps=1e-6,
120
+ bias=True,
121
+ processor=HunyuanAttnProcessor2_0(),
122
+ )
123
+
124
+ # 2. Cross-Attn
125
+ self.norm2 = FP32LayerNorm(dim, norm_eps, norm_elementwise_affine)
126
+
127
+ self.attn2 = Attention(
128
+ query_dim=dim,
129
+ cross_attention_dim=cross_attention_dim,
130
+ dim_head=dim // num_attention_heads,
131
+ heads=num_attention_heads,
132
+ qk_norm="layer_norm" if qk_norm else None,
133
+ eps=1e-6,
134
+ bias=True,
135
+ processor=HunyuanAttnProcessor2_0(),
136
+ )
137
+ # 3. Feed-forward
138
+ self.norm3 = FP32LayerNorm(dim, norm_eps, norm_elementwise_affine)
139
+
140
+ self.ff = FeedForward(
141
+ dim,
142
+ dropout=dropout, ### 0.0
143
+ activation_fn=activation_fn, ### approx GeLU
144
+ final_dropout=final_dropout, ### 0.0
145
+ inner_dim=ff_inner_dim, ### int(dim * mlp_ratio)
146
+ bias=ff_bias,
147
+ )
148
+
149
+ # 4. Skip Connection
150
+ if skip:
151
+ self.skip_norm = FP32LayerNorm(2 * dim, norm_eps, elementwise_affine=True)
152
+ self.skip_linear = nn.Linear(2 * dim, dim)
153
+ else:
154
+ self.skip_linear = None
155
+
156
+ # let chunk size default to None
157
+ self._chunk_size = None
158
+ self._chunk_dim = 0
159
+
160
+ # Copied from diffusers.models.attention.BasicTransformerBlock.set_chunk_feed_forward
161
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
162
+ # Sets chunk feed-forward
163
+ self._chunk_size = chunk_size
164
+ self._chunk_dim = dim
165
+
166
+ def forward(
167
+ self,
168
+ hidden_states: torch.Tensor,
169
+ encoder_hidden_states: Optional[torch.Tensor] = None,
170
+ temb: Optional[torch.Tensor] = None,
171
+ image_rotary_emb=None,
172
+ skip=None,
173
+ ) -> torch.Tensor:
174
+ # Notice that normalization is always applied before the real computation in the following blocks.
175
+ # 0. Long Skip Connection
176
+ if self.skip_linear is not None:
177
+ cat = torch.cat([hidden_states, skip], dim=-1)
178
+ cat = self.skip_norm(cat)
179
+ hidden_states = self.skip_linear(cat)
180
+
181
+ # 1. Self-Attention
182
+ norm_hidden_states = self.norm1(hidden_states, temb) ### checked: self.norm1 is correct
183
+ attn_output = self.attn1(
184
+ norm_hidden_states,
185
+ image_rotary_emb=image_rotary_emb,
186
+ )
187
+ hidden_states = hidden_states + attn_output
188
+
189
+ # 2. Cross-Attention
190
+ hidden_states = hidden_states + self.attn2(
191
+ self.norm2(hidden_states),
192
+ encoder_hidden_states=encoder_hidden_states,
193
+ image_rotary_emb=image_rotary_emb,
194
+ )
195
+
196
+ # FFN Layer ### TODO: switch norm2 and norm3 in the state dict
197
+ mlp_inputs = self.norm3(hidden_states)
198
+ hidden_states = hidden_states + self.ff(mlp_inputs)
199
+
200
+ return hidden_states
201
+
202
+
203
+ class HunyuanDiT2DModel(ModelMixin, ConfigMixin):
204
+ """
205
+ HunYuanDiT: Diffusion model with a Transformer backbone.
206
+
207
+ Inherit ModelMixin and ConfigMixin to be compatible with the sampler StableDiffusionPipeline of diffusers.
208
+
209
+ Parameters:
210
+ num_attention_heads (`int`, *optional*, defaults to 16):
211
+ The number of heads to use for multi-head attention.
212
+ attention_head_dim (`int`, *optional*, defaults to 88):
213
+ The number of channels in each head.
214
+ in_channels (`int`, *optional*):
215
+ The number of channels in the input and output (specify if the input is **continuous**).
216
+ patch_size (`int`, *optional*):
217
+ The size of the patch to use for the input.
218
+ activation_fn (`str`, *optional*, defaults to `"geglu"`):
219
+ Activation function to use in feed-forward.
220
+ sample_size (`int`, *optional*):
221
+ The width of the latent images. This is fixed during training since it is used to learn a number of
222
+ position embeddings.
223
+ dropout (`float`, *optional*, defaults to 0.0):
224
+ The dropout probability to use.
225
+ cross_attention_dim (`int`, *optional*):
226
+ The number of dimension in the clip text embedding.
227
+ hidden_size (`int`, *optional*):
228
+ The size of hidden layer in the conditioning embedding layers.
229
+ num_layers (`int`, *optional*, defaults to 1):
230
+ The number of layers of Transformer blocks to use.
231
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
232
+ The ratio of the hidden layer size to the input size.
233
+ learn_sigma (`bool`, *optional*, defaults to `True`):
234
+ Whether to predict variance.
235
+ cross_attention_dim_t5 (`int`, *optional*):
236
+ The number dimensions in t5 text embedding.
237
+ pooled_projection_dim (`int`, *optional*):
238
+ The size of the pooled projection.
239
+ text_len (`int`, *optional*):
240
+ The length of the clip text embedding.
241
+ text_len_t5 (`int`, *optional*):
242
+ The length of the T5 text embedding.
243
+ use_style_cond_and_image_meta_size (`bool`, *optional*):
244
+ Whether or not to use style condition and image meta size. True for version <=1.1, False for version >= 1.2
245
+ """
246
+
247
+ _skip_layerwise_casting_patterns = ["pos_embed", "norm", "pooler"]
248
+ _supports_group_offloading = False
249
+
250
+ @register_to_config
251
+ def __init__(
252
+ self,
253
+ num_attention_heads: int = 16,
254
+ attention_head_dim: int = 88,
255
+ in_channels: Optional[int] = None,
256
+ patch_size: Optional[int] = None,
257
+ activation_fn: str = "gelu-approximate",
258
+ sample_size=32,
259
+ hidden_size=1152,
260
+ num_layers: int = 28,
261
+ mlp_ratio: float = 4.0,
262
+ learn_sigma: bool = True,
263
+ cross_attention_dim: int = 1024,
264
+ norm_type: str = "layer_norm",
265
+ cross_attention_dim_t5: int = 2048,
266
+ pooled_projection_dim: int = 1024,
267
+ text_len: int = 77,
268
+ text_len_t5: int = 256,
269
+ use_style_cond_and_image_meta_size: bool = True,
270
+ ):
271
+ super().__init__()
272
+ self.out_channels = in_channels * 2 if learn_sigma else in_channels
273
+ self.num_heads = num_attention_heads
274
+ self.inner_dim = num_attention_heads * attention_head_dim
275
+
276
+ self.text_embedder = PixArtAlphaTextProjection(
277
+ in_features=cross_attention_dim_t5,
278
+ hidden_size=cross_attention_dim_t5 * 4,
279
+ out_features=cross_attention_dim,
280
+ act_fn="silu_fp32",
281
+ )
282
+
283
+ self.text_embedding_padding = nn.Parameter(torch.randn(text_len + text_len_t5, cross_attention_dim))
284
+
285
+ self.pos_embed = PatchEmbed(
286
+ height=sample_size,
287
+ width=sample_size,
288
+ in_channels=in_channels,
289
+ embed_dim=hidden_size,
290
+ patch_size=patch_size,
291
+ pos_embed_type=None,
292
+ )
293
+
294
+ self.time_extra_emb = HunyuanCombinedTimestepTextSizeStyleEmbedding(
295
+ hidden_size,
296
+ pooled_projection_dim=pooled_projection_dim,
297
+ seq_len=text_len_t5,
298
+ cross_attention_dim=cross_attention_dim_t5,
299
+ use_style_cond_and_image_meta_size=use_style_cond_and_image_meta_size,
300
+ )
301
+
302
+ # HunyuanDiT Blocks
303
+ self.blocks = nn.ModuleList(
304
+ [
305
+ HunyuanDiTBlock(
306
+ dim=self.inner_dim,
307
+ num_attention_heads=self.config.num_attention_heads,
308
+ activation_fn=activation_fn,
309
+ ff_inner_dim=int(self.inner_dim * mlp_ratio),
310
+ cross_attention_dim=cross_attention_dim,
311
+ qk_norm=True, # See https://huggingface.co/papers/2302.05442 for details.
312
+ skip=layer > num_layers // 2,
313
+ )
314
+ for layer in range(num_layers)
315
+ ]
316
+ )
317
+
318
+ self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
319
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
320
+
321
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedHunyuanAttnProcessor2_0
322
+ def fuse_qkv_projections(self):
323
+ """
324
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
325
+ are fused. For cross-attention modules, key and value projection matrices are fused.
326
+
327
+ <Tip warning={true}>
328
+
329
+ This API is 🧪 experimental.
330
+
331
+ </Tip>
332
+ """
333
+ self.original_attn_processors = None
334
+
335
+ for _, attn_processor in self.attn_processors.items():
336
+ if "Added" in str(attn_processor.__class__.__name__):
337
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
338
+
339
+ self.original_attn_processors = self.attn_processors
340
+
341
+ for module in self.modules():
342
+ if isinstance(module, Attention):
343
+ module.fuse_projections(fuse=True)
344
+
345
+ self.set_attn_processor(FusedHunyuanAttnProcessor2_0())
346
+
347
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
348
+ def unfuse_qkv_projections(self):
349
+ """Disables the fused QKV projection if enabled.
350
+
351
+ <Tip warning={true}>
352
+
353
+ This API is 🧪 experimental.
354
+
355
+ </Tip>
356
+
357
+ """
358
+ if self.original_attn_processors is not None:
359
+ self.set_attn_processor(self.original_attn_processors)
360
+
361
+ @property
362
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
363
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
364
+ r"""
365
+ Returns:
366
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
367
+ indexed by its weight name.
368
+ """
369
+ # set recursively
370
+ processors = {}
371
+
372
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
373
+ if hasattr(module, "get_processor"):
374
+ processors[f"{name}.processor"] = module.get_processor()
375
+
376
+ for sub_name, child in module.named_children():
377
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
378
+
379
+ return processors
380
+
381
+ for name, module in self.named_children():
382
+ fn_recursive_add_processors(name, module, processors)
383
+
384
+ return processors
385
+
386
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
387
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
388
+ r"""
389
+ Sets the attention processor to use to compute attention.
390
+
391
+ Parameters:
392
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
393
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
394
+ for **all** `Attention` layers.
395
+
396
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
397
+ processor. This is strongly recommended when setting trainable attention processors.
398
+
399
+ """
400
+ count = len(self.attn_processors.keys())
401
+
402
+ if isinstance(processor, dict) and len(processor) != count:
403
+ raise ValueError(
404
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
405
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
406
+ )
407
+
408
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
409
+ if hasattr(module, "set_processor"):
410
+ if not isinstance(processor, dict):
411
+ module.set_processor(processor)
412
+ else:
413
+ module.set_processor(processor.pop(f"{name}.processor"))
414
+
415
+ for sub_name, child in module.named_children():
416
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
417
+
418
+ for name, module in self.named_children():
419
+ fn_recursive_attn_processor(name, module, processor)
420
+
421
+ def set_default_attn_processor(self):
422
+ """
423
+ Disables custom attention processors and sets the default attention implementation.
424
+ """
425
+ self.set_attn_processor(HunyuanAttnProcessor2_0())
426
+
427
+ def forward(
428
+ self,
429
+ hidden_states,
430
+ timestep,
431
+ encoder_hidden_states=None,
432
+ text_embedding_mask=None,
433
+ encoder_hidden_states_t5=None,
434
+ text_embedding_mask_t5=None,
435
+ image_meta_size=None,
436
+ style=None,
437
+ image_rotary_emb=None,
438
+ controlnet_block_samples=None,
439
+ return_dict=True,
440
+ ):
441
+ """
442
+ The [`HunyuanDiT2DModel`] forward method.
443
+
444
+ Args:
445
+ hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`):
446
+ The input tensor.
447
+ timestep ( `torch.LongTensor`, *optional*):
448
+ Used to indicate denoising step.
449
+ encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
450
+ Conditional embeddings for cross attention layer. This is the output of `BertModel`.
451
+ text_embedding_mask: torch.Tensor
452
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
453
+ of `BertModel`.
454
+ encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
455
+ Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder.
456
+ text_embedding_mask_t5: torch.Tensor
457
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
458
+ of T5 Text Encoder.
459
+ image_meta_size (torch.Tensor):
460
+ Conditional embedding indicate the image sizes
461
+ style: torch.Tensor:
462
+ Conditional embedding indicate the style
463
+ image_rotary_emb (`torch.Tensor`):
464
+ The image rotary embeddings to apply on query and key tensors during attention calculation.
465
+ return_dict: bool
466
+ Whether to return a dictionary.
467
+ """
468
+
469
+ height, width = hidden_states.shape[-2:]
470
+
471
+ hidden_states = self.pos_embed(hidden_states)
472
+
473
+ temb = self.time_extra_emb(
474
+ timestep, encoder_hidden_states_t5, image_meta_size, style, hidden_dtype=timestep.dtype
475
+ ) # [B, D]
476
+
477
+ # text projection
478
+ batch_size, sequence_length, _ = encoder_hidden_states_t5.shape
479
+ encoder_hidden_states_t5 = self.text_embedder(
480
+ encoder_hidden_states_t5.view(-1, encoder_hidden_states_t5.shape[-1])
481
+ )
482
+ encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, sequence_length, -1)
483
+
484
+ encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1)
485
+ text_embedding_mask = torch.cat([text_embedding_mask, text_embedding_mask_t5], dim=-1)
486
+ text_embedding_mask = text_embedding_mask.unsqueeze(2).bool()
487
+
488
+ encoder_hidden_states = torch.where(text_embedding_mask, encoder_hidden_states, self.text_embedding_padding)
489
+
490
+ skips = []
491
+ for layer, block in enumerate(self.blocks):
492
+ if layer > self.config.num_layers // 2:
493
+ if controlnet_block_samples is not None:
494
+ skip = skips.pop() + controlnet_block_samples.pop()
495
+ else:
496
+ skip = skips.pop()
497
+ hidden_states = block(
498
+ hidden_states,
499
+ temb=temb,
500
+ encoder_hidden_states=encoder_hidden_states,
501
+ image_rotary_emb=image_rotary_emb,
502
+ skip=skip,
503
+ ) # (N, L, D)
504
+ else:
505
+ hidden_states = block(
506
+ hidden_states,
507
+ temb=temb,
508
+ encoder_hidden_states=encoder_hidden_states,
509
+ image_rotary_emb=image_rotary_emb,
510
+ ) # (N, L, D)
511
+
512
+ if layer < (self.config.num_layers // 2 - 1):
513
+ skips.append(hidden_states)
514
+
515
+ if controlnet_block_samples is not None and len(controlnet_block_samples) != 0:
516
+ raise ValueError("The number of controls is not equal to the number of skip connections.")
517
+
518
+ # final layer
519
+ hidden_states = self.norm_out(hidden_states, temb.to(torch.float32))
520
+ hidden_states = self.proj_out(hidden_states)
521
+ # (N, L, patch_size ** 2 * out_channels)
522
+
523
+ # unpatchify: (N, out_channels, H, W)
524
+ patch_size = self.pos_embed.patch_size
525
+ height = height // patch_size
526
+ width = width // patch_size
527
+
528
+ hidden_states = hidden_states.reshape(
529
+ shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels)
530
+ )
531
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
532
+ output = hidden_states.reshape(
533
+ shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size)
534
+ )
535
+ if not return_dict:
536
+ return (output,)
537
+ return Transformer2DModelOutput(sample=output)
538
+
539
+ # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking
540
+ def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None:
541
+ """
542
+ Sets the attention processor to use [feed forward
543
+ chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
544
+
545
+ Parameters:
546
+ chunk_size (`int`, *optional*):
547
+ The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually
548
+ over each tensor of dim=`dim`.
549
+ dim (`int`, *optional*, defaults to `0`):
550
+ The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch)
551
+ or dim=1 (sequence length).
552
+ """
553
+ if dim not in [0, 1]:
554
+ raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}")
555
+
556
+ # By default chunk size is 1
557
+ chunk_size = chunk_size or 1
558
+
559
+ def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
560
+ if hasattr(module, "set_chunk_feed_forward"):
561
+ module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
562
+
563
+ for child in module.children():
564
+ fn_recursive_feed_forward(child, chunk_size, dim)
565
+
566
+ for module in self.children():
567
+ fn_recursive_feed_forward(module, chunk_size, dim)
568
+
569
+ # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking
570
+ def disable_forward_chunking(self):
571
+ def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
572
+ if hasattr(module, "set_chunk_feed_forward"):
573
+ module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
574
+
575
+ for child in module.children():
576
+ fn_recursive_feed_forward(child, chunk_size, dim)
577
+
578
+ for module in self.children():
579
+ fn_recursive_feed_forward(module, None, 0)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/latte_transformer_3d.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 the Latte Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Optional
16
+
17
+ import torch
18
+ from torch import nn
19
+
20
+ from ...configuration_utils import ConfigMixin, register_to_config
21
+ from ..attention import BasicTransformerBlock
22
+ from ..cache_utils import CacheMixin
23
+ from ..embeddings import PatchEmbed, PixArtAlphaTextProjection, get_1d_sincos_pos_embed_from_grid
24
+ from ..modeling_outputs import Transformer2DModelOutput
25
+ from ..modeling_utils import ModelMixin
26
+ from ..normalization import AdaLayerNormSingle
27
+
28
+
29
+ class LatteTransformer3DModel(ModelMixin, ConfigMixin, CacheMixin):
30
+ _supports_gradient_checkpointing = True
31
+
32
+ """
33
+ A 3D Transformer model for video-like data, paper: https://huggingface.co/papers/2401.03048, official code:
34
+ https://github.com/Vchitect/Latte
35
+
36
+ Parameters:
37
+ num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
38
+ attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
39
+ in_channels (`int`, *optional*):
40
+ The number of channels in the input.
41
+ out_channels (`int`, *optional*):
42
+ The number of channels in the output.
43
+ num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
44
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
45
+ cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
46
+ attention_bias (`bool`, *optional*):
47
+ Configure if the `TransformerBlocks` attention should contain a bias parameter.
48
+ sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
49
+ This is fixed during training since it is used to learn a number of position embeddings.
50
+ patch_size (`int`, *optional*):
51
+ The size of the patches to use in the patch embedding layer.
52
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward.
53
+ num_embeds_ada_norm ( `int`, *optional*):
54
+ The number of diffusion steps used during training. Pass if at least one of the norm_layers is
55
+ `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are
56
+ added to the hidden states. During inference, you can denoise for up to but not more steps than
57
+ `num_embeds_ada_norm`.
58
+ norm_type (`str`, *optional*, defaults to `"layer_norm"`):
59
+ The type of normalization to use. Options are `"layer_norm"` or `"ada_layer_norm"`.
60
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
61
+ Whether or not to use elementwise affine in normalization layers.
62
+ norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon value to use in normalization layers.
63
+ caption_channels (`int`, *optional*):
64
+ The number of channels in the caption embeddings.
65
+ video_length (`int`, *optional*):
66
+ The number of frames in the video-like data.
67
+ """
68
+
69
+ _skip_layerwise_casting_patterns = ["pos_embed", "norm"]
70
+
71
+ @register_to_config
72
+ def __init__(
73
+ self,
74
+ num_attention_heads: int = 16,
75
+ attention_head_dim: int = 88,
76
+ in_channels: Optional[int] = None,
77
+ out_channels: Optional[int] = None,
78
+ num_layers: int = 1,
79
+ dropout: float = 0.0,
80
+ cross_attention_dim: Optional[int] = None,
81
+ attention_bias: bool = False,
82
+ sample_size: int = 64,
83
+ patch_size: Optional[int] = None,
84
+ activation_fn: str = "geglu",
85
+ num_embeds_ada_norm: Optional[int] = None,
86
+ norm_type: str = "layer_norm",
87
+ norm_elementwise_affine: bool = True,
88
+ norm_eps: float = 1e-5,
89
+ caption_channels: int = None,
90
+ video_length: int = 16,
91
+ ):
92
+ super().__init__()
93
+ inner_dim = num_attention_heads * attention_head_dim
94
+
95
+ # 1. Define input layers
96
+ self.height = sample_size
97
+ self.width = sample_size
98
+
99
+ interpolation_scale = self.config.sample_size // 64
100
+ interpolation_scale = max(interpolation_scale, 1)
101
+ self.pos_embed = PatchEmbed(
102
+ height=sample_size,
103
+ width=sample_size,
104
+ patch_size=patch_size,
105
+ in_channels=in_channels,
106
+ embed_dim=inner_dim,
107
+ interpolation_scale=interpolation_scale,
108
+ )
109
+
110
+ # 2. Define spatial transformers blocks
111
+ self.transformer_blocks = nn.ModuleList(
112
+ [
113
+ BasicTransformerBlock(
114
+ inner_dim,
115
+ num_attention_heads,
116
+ attention_head_dim,
117
+ dropout=dropout,
118
+ cross_attention_dim=cross_attention_dim,
119
+ activation_fn=activation_fn,
120
+ num_embeds_ada_norm=num_embeds_ada_norm,
121
+ attention_bias=attention_bias,
122
+ norm_type=norm_type,
123
+ norm_elementwise_affine=norm_elementwise_affine,
124
+ norm_eps=norm_eps,
125
+ )
126
+ for d in range(num_layers)
127
+ ]
128
+ )
129
+
130
+ # 3. Define temporal transformers blocks
131
+ self.temporal_transformer_blocks = nn.ModuleList(
132
+ [
133
+ BasicTransformerBlock(
134
+ inner_dim,
135
+ num_attention_heads,
136
+ attention_head_dim,
137
+ dropout=dropout,
138
+ cross_attention_dim=None,
139
+ activation_fn=activation_fn,
140
+ num_embeds_ada_norm=num_embeds_ada_norm,
141
+ attention_bias=attention_bias,
142
+ norm_type=norm_type,
143
+ norm_elementwise_affine=norm_elementwise_affine,
144
+ norm_eps=norm_eps,
145
+ )
146
+ for d in range(num_layers)
147
+ ]
148
+ )
149
+
150
+ # 4. Define output layers
151
+ self.out_channels = in_channels if out_channels is None else out_channels
152
+ self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
153
+ self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5)
154
+ self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)
155
+
156
+ # 5. Latte other blocks.
157
+ self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=False)
158
+ self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim)
159
+
160
+ # define temporal positional embedding
161
+ temp_pos_embed = get_1d_sincos_pos_embed_from_grid(
162
+ inner_dim, torch.arange(0, video_length).unsqueeze(1), output_type="pt"
163
+ ) # 1152 hidden size
164
+ self.register_buffer("temp_pos_embed", temp_pos_embed.float().unsqueeze(0), persistent=False)
165
+
166
+ self.gradient_checkpointing = False
167
+
168
+ def forward(
169
+ self,
170
+ hidden_states: torch.Tensor,
171
+ timestep: Optional[torch.LongTensor] = None,
172
+ encoder_hidden_states: Optional[torch.Tensor] = None,
173
+ encoder_attention_mask: Optional[torch.Tensor] = None,
174
+ enable_temporal_attentions: bool = True,
175
+ return_dict: bool = True,
176
+ ):
177
+ """
178
+ The [`LatteTransformer3DModel`] forward method.
179
+
180
+ Args:
181
+ hidden_states shape `(batch size, channel, num_frame, height, width)`:
182
+ Input `hidden_states`.
183
+ timestep ( `torch.LongTensor`, *optional*):
184
+ Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
185
+ encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
186
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
187
+ self-attention.
188
+ encoder_attention_mask ( `torch.Tensor`, *optional*):
189
+ Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
190
+
191
+ * Mask `(batcheight, sequence_length)` True = keep, False = discard.
192
+ * Bias `(batcheight, 1, sequence_length)` 0 = keep, -10000 = discard.
193
+
194
+ If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
195
+ above. This bias will be added to the cross-attention scores.
196
+ enable_temporal_attentions:
197
+ (`bool`, *optional*, defaults to `True`): Whether to enable temporal attentions.
198
+ return_dict (`bool`, *optional*, defaults to `True`):
199
+ Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
200
+ tuple.
201
+
202
+ Returns:
203
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
204
+ `tuple` where the first element is the sample tensor.
205
+ """
206
+
207
+ # Reshape hidden states
208
+ batch_size, channels, num_frame, height, width = hidden_states.shape
209
+ # batch_size channels num_frame height width -> (batch_size * num_frame) channels height width
210
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(-1, channels, height, width)
211
+
212
+ # Input
213
+ height, width = (
214
+ hidden_states.shape[-2] // self.config.patch_size,
215
+ hidden_states.shape[-1] // self.config.patch_size,
216
+ )
217
+ num_patches = height * width
218
+
219
+ hidden_states = self.pos_embed(hidden_states) # already add positional embeddings
220
+
221
+ added_cond_kwargs = {"resolution": None, "aspect_ratio": None}
222
+ timestep, embedded_timestep = self.adaln_single(
223
+ timestep, added_cond_kwargs=added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype
224
+ )
225
+
226
+ # Prepare text embeddings for spatial block
227
+ # batch_size num_tokens hidden_size -> (batch_size * num_frame) num_tokens hidden_size
228
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states) # 3 120 1152
229
+ encoder_hidden_states_spatial = encoder_hidden_states.repeat_interleave(
230
+ num_frame, dim=0, output_size=encoder_hidden_states.shape[0] * num_frame
231
+ ).view(-1, encoder_hidden_states.shape[-2], encoder_hidden_states.shape[-1])
232
+
233
+ # Prepare timesteps for spatial and temporal block
234
+ timestep_spatial = timestep.repeat_interleave(
235
+ num_frame, dim=0, output_size=timestep.shape[0] * num_frame
236
+ ).view(-1, timestep.shape[-1])
237
+ timestep_temp = timestep.repeat_interleave(
238
+ num_patches, dim=0, output_size=timestep.shape[0] * num_patches
239
+ ).view(-1, timestep.shape[-1])
240
+
241
+ # Spatial and temporal transformer blocks
242
+ for i, (spatial_block, temp_block) in enumerate(
243
+ zip(self.transformer_blocks, self.temporal_transformer_blocks)
244
+ ):
245
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
246
+ hidden_states = self._gradient_checkpointing_func(
247
+ spatial_block,
248
+ hidden_states,
249
+ None, # attention_mask
250
+ encoder_hidden_states_spatial,
251
+ encoder_attention_mask,
252
+ timestep_spatial,
253
+ None, # cross_attention_kwargs
254
+ None, # class_labels
255
+ )
256
+ else:
257
+ hidden_states = spatial_block(
258
+ hidden_states,
259
+ None, # attention_mask
260
+ encoder_hidden_states_spatial,
261
+ encoder_attention_mask,
262
+ timestep_spatial,
263
+ None, # cross_attention_kwargs
264
+ None, # class_labels
265
+ )
266
+
267
+ if enable_temporal_attentions:
268
+ # (batch_size * num_frame) num_tokens hidden_size -> (batch_size * num_tokens) num_frame hidden_size
269
+ hidden_states = hidden_states.reshape(
270
+ batch_size, -1, hidden_states.shape[-2], hidden_states.shape[-1]
271
+ ).permute(0, 2, 1, 3)
272
+ hidden_states = hidden_states.reshape(-1, hidden_states.shape[-2], hidden_states.shape[-1])
273
+
274
+ if i == 0 and num_frame > 1:
275
+ hidden_states = hidden_states + self.temp_pos_embed.to(hidden_states.dtype)
276
+
277
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
278
+ hidden_states = self._gradient_checkpointing_func(
279
+ temp_block,
280
+ hidden_states,
281
+ None, # attention_mask
282
+ None, # encoder_hidden_states
283
+ None, # encoder_attention_mask
284
+ timestep_temp,
285
+ None, # cross_attention_kwargs
286
+ None, # class_labels
287
+ )
288
+ else:
289
+ hidden_states = temp_block(
290
+ hidden_states,
291
+ None, # attention_mask
292
+ None, # encoder_hidden_states
293
+ None, # encoder_attention_mask
294
+ timestep_temp,
295
+ None, # cross_attention_kwargs
296
+ None, # class_labels
297
+ )
298
+
299
+ # (batch_size * num_tokens) num_frame hidden_size -> (batch_size * num_frame) num_tokens hidden_size
300
+ hidden_states = hidden_states.reshape(
301
+ batch_size, -1, hidden_states.shape[-2], hidden_states.shape[-1]
302
+ ).permute(0, 2, 1, 3)
303
+ hidden_states = hidden_states.reshape(-1, hidden_states.shape[-2], hidden_states.shape[-1])
304
+
305
+ embedded_timestep = embedded_timestep.repeat_interleave(
306
+ num_frame, dim=0, output_size=embedded_timestep.shape[0] * num_frame
307
+ ).view(-1, embedded_timestep.shape[-1])
308
+ shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1)
309
+ hidden_states = self.norm_out(hidden_states)
310
+ # Modulation
311
+ hidden_states = hidden_states * (1 + scale) + shift
312
+ hidden_states = self.proj_out(hidden_states)
313
+
314
+ # unpatchify
315
+ if self.adaln_single is None:
316
+ height = width = int(hidden_states.shape[1] ** 0.5)
317
+ hidden_states = hidden_states.reshape(
318
+ shape=(-1, height, width, self.config.patch_size, self.config.patch_size, self.out_channels)
319
+ )
320
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
321
+ output = hidden_states.reshape(
322
+ shape=(-1, self.out_channels, height * self.config.patch_size, width * self.config.patch_size)
323
+ )
324
+ output = output.reshape(batch_size, -1, output.shape[-3], output.shape[-2], output.shape[-1]).permute(
325
+ 0, 2, 1, 3, 4
326
+ )
327
+
328
+ if not return_dict:
329
+ return (output,)
330
+
331
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/lumina_nextdit2d.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Alpha-VLLM Authors and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Dict, Optional
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from ...configuration_utils import ConfigMixin, register_to_config
21
+ from ...utils import logging
22
+ from ..attention import LuminaFeedForward
23
+ from ..attention_processor import Attention, LuminaAttnProcessor2_0
24
+ from ..embeddings import (
25
+ LuminaCombinedTimestepCaptionEmbedding,
26
+ LuminaPatchEmbed,
27
+ )
28
+ from ..modeling_outputs import Transformer2DModelOutput
29
+ from ..modeling_utils import ModelMixin
30
+ from ..normalization import LuminaLayerNormContinuous, LuminaRMSNormZero, RMSNorm
31
+
32
+
33
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
+
35
+
36
+ class LuminaNextDiTBlock(nn.Module):
37
+ """
38
+ A LuminaNextDiTBlock for LuminaNextDiT2DModel.
39
+
40
+ Parameters:
41
+ dim (`int`): Embedding dimension of the input features.
42
+ num_attention_heads (`int`): Number of attention heads.
43
+ num_kv_heads (`int`):
44
+ Number of attention heads in key and value features (if using GQA), or set to None for the same as query.
45
+ multiple_of (`int`): The number of multiple of ffn layer.
46
+ ffn_dim_multiplier (`float`): The multiplier factor of ffn layer dimension.
47
+ norm_eps (`float`): The eps for norm layer.
48
+ qk_norm (`bool`): normalization for query and key.
49
+ cross_attention_dim (`int`): Cross attention embedding dimension of the input text prompt hidden_states.
50
+ norm_elementwise_affine (`bool`, *optional*, defaults to True),
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ dim: int,
56
+ num_attention_heads: int,
57
+ num_kv_heads: int,
58
+ multiple_of: int,
59
+ ffn_dim_multiplier: float,
60
+ norm_eps: float,
61
+ qk_norm: bool,
62
+ cross_attention_dim: int,
63
+ norm_elementwise_affine: bool = True,
64
+ ) -> None:
65
+ super().__init__()
66
+ self.head_dim = dim // num_attention_heads
67
+
68
+ self.gate = nn.Parameter(torch.zeros([num_attention_heads]))
69
+
70
+ # Self-attention
71
+ self.attn1 = Attention(
72
+ query_dim=dim,
73
+ cross_attention_dim=None,
74
+ dim_head=dim // num_attention_heads,
75
+ qk_norm="layer_norm_across_heads" if qk_norm else None,
76
+ heads=num_attention_heads,
77
+ kv_heads=num_kv_heads,
78
+ eps=1e-5,
79
+ bias=False,
80
+ out_bias=False,
81
+ processor=LuminaAttnProcessor2_0(),
82
+ )
83
+ self.attn1.to_out = nn.Identity()
84
+
85
+ # Cross-attention
86
+ self.attn2 = Attention(
87
+ query_dim=dim,
88
+ cross_attention_dim=cross_attention_dim,
89
+ dim_head=dim // num_attention_heads,
90
+ qk_norm="layer_norm_across_heads" if qk_norm else None,
91
+ heads=num_attention_heads,
92
+ kv_heads=num_kv_heads,
93
+ eps=1e-5,
94
+ bias=False,
95
+ out_bias=False,
96
+ processor=LuminaAttnProcessor2_0(),
97
+ )
98
+
99
+ self.feed_forward = LuminaFeedForward(
100
+ dim=dim,
101
+ inner_dim=int(4 * 2 * dim / 3),
102
+ multiple_of=multiple_of,
103
+ ffn_dim_multiplier=ffn_dim_multiplier,
104
+ )
105
+
106
+ self.norm1 = LuminaRMSNormZero(
107
+ embedding_dim=dim,
108
+ norm_eps=norm_eps,
109
+ norm_elementwise_affine=norm_elementwise_affine,
110
+ )
111
+ self.ffn_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine)
112
+
113
+ self.norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine)
114
+ self.ffn_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine)
115
+
116
+ self.norm1_context = RMSNorm(cross_attention_dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine)
117
+
118
+ def forward(
119
+ self,
120
+ hidden_states: torch.Tensor,
121
+ attention_mask: torch.Tensor,
122
+ image_rotary_emb: torch.Tensor,
123
+ encoder_hidden_states: torch.Tensor,
124
+ encoder_mask: torch.Tensor,
125
+ temb: torch.Tensor,
126
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
127
+ ):
128
+ """
129
+ Perform a forward pass through the LuminaNextDiTBlock.
130
+
131
+ Parameters:
132
+ hidden_states (`torch.Tensor`): The input of hidden_states for LuminaNextDiTBlock.
133
+ attention_mask (`torch.Tensor): The input of hidden_states corresponse attention mask.
134
+ image_rotary_emb (`torch.Tensor`): Precomputed cosine and sine frequencies.
135
+ encoder_hidden_states: (`torch.Tensor`): The hidden_states of text prompt are processed by Gemma encoder.
136
+ encoder_mask (`torch.Tensor`): The hidden_states of text prompt attention mask.
137
+ temb (`torch.Tensor`): Timestep embedding with text prompt embedding.
138
+ cross_attention_kwargs (`Dict[str, Any]`): kwargs for cross attention.
139
+ """
140
+ residual = hidden_states
141
+
142
+ # Self-attention
143
+ norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb)
144
+ self_attn_output = self.attn1(
145
+ hidden_states=norm_hidden_states,
146
+ encoder_hidden_states=norm_hidden_states,
147
+ attention_mask=attention_mask,
148
+ query_rotary_emb=image_rotary_emb,
149
+ key_rotary_emb=image_rotary_emb,
150
+ **cross_attention_kwargs,
151
+ )
152
+
153
+ # Cross-attention
154
+ norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states)
155
+ cross_attn_output = self.attn2(
156
+ hidden_states=norm_hidden_states,
157
+ encoder_hidden_states=norm_encoder_hidden_states,
158
+ attention_mask=encoder_mask,
159
+ query_rotary_emb=image_rotary_emb,
160
+ key_rotary_emb=None,
161
+ **cross_attention_kwargs,
162
+ )
163
+ cross_attn_output = cross_attn_output * self.gate.tanh().view(1, 1, -1, 1)
164
+ mixed_attn_output = self_attn_output + cross_attn_output
165
+ mixed_attn_output = mixed_attn_output.flatten(-2)
166
+ # linear proj
167
+ hidden_states = self.attn2.to_out[0](mixed_attn_output)
168
+
169
+ hidden_states = residual + gate_msa.unsqueeze(1).tanh() * self.norm2(hidden_states)
170
+
171
+ mlp_output = self.feed_forward(self.ffn_norm1(hidden_states) * (1 + scale_mlp.unsqueeze(1)))
172
+
173
+ hidden_states = hidden_states + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(mlp_output)
174
+
175
+ return hidden_states
176
+
177
+
178
+ class LuminaNextDiT2DModel(ModelMixin, ConfigMixin):
179
+ """
180
+ LuminaNextDiT: Diffusion model with a Transformer backbone.
181
+
182
+ Inherit ModelMixin and ConfigMixin to be compatible with the sampler StableDiffusionPipeline of diffusers.
183
+
184
+ Parameters:
185
+ sample_size (`int`): The width of the latent images. This is fixed during training since
186
+ it is used to learn a number of position embeddings.
187
+ patch_size (`int`, *optional*, (`int`, *optional*, defaults to 2):
188
+ The size of each patch in the image. This parameter defines the resolution of patches fed into the model.
189
+ in_channels (`int`, *optional*, defaults to 4):
190
+ The number of input channels for the model. Typically, this matches the number of channels in the input
191
+ images.
192
+ hidden_size (`int`, *optional*, defaults to 4096):
193
+ The dimensionality of the hidden layers in the model. This parameter determines the width of the model's
194
+ hidden representations.
195
+ num_layers (`int`, *optional*, default to 32):
196
+ The number of layers in the model. This defines the depth of the neural network.
197
+ num_attention_heads (`int`, *optional*, defaults to 32):
198
+ The number of attention heads in each attention layer. This parameter specifies how many separate attention
199
+ mechanisms are used.
200
+ num_kv_heads (`int`, *optional*, defaults to 8):
201
+ The number of key-value heads in the attention mechanism, if different from the number of attention heads.
202
+ If None, it defaults to num_attention_heads.
203
+ multiple_of (`int`, *optional*, defaults to 256):
204
+ A factor that the hidden size should be a multiple of. This can help optimize certain hardware
205
+ configurations.
206
+ ffn_dim_multiplier (`float`, *optional*):
207
+ A multiplier for the dimensionality of the feed-forward network. If None, it uses a default value based on
208
+ the model configuration.
209
+ norm_eps (`float`, *optional*, defaults to 1e-5):
210
+ A small value added to the denominator for numerical stability in normalization layers.
211
+ learn_sigma (`bool`, *optional*, defaults to True):
212
+ Whether the model should learn the sigma parameter, which might be related to uncertainty or variance in
213
+ predictions.
214
+ qk_norm (`bool`, *optional*, defaults to True):
215
+ Indicates if the queries and keys in the attention mechanism should be normalized.
216
+ cross_attention_dim (`int`, *optional*, defaults to 2048):
217
+ The dimensionality of the text embeddings. This parameter defines the size of the text representations used
218
+ in the model.
219
+ scaling_factor (`float`, *optional*, defaults to 1.0):
220
+ A scaling factor applied to certain parameters or layers in the model. This can be used for adjusting the
221
+ overall scale of the model's operations.
222
+ """
223
+
224
+ _skip_layerwise_casting_patterns = ["patch_embedder", "norm", "ffn_norm"]
225
+
226
+ @register_to_config
227
+ def __init__(
228
+ self,
229
+ sample_size: int = 128,
230
+ patch_size: Optional[int] = 2,
231
+ in_channels: Optional[int] = 4,
232
+ hidden_size: Optional[int] = 2304,
233
+ num_layers: Optional[int] = 32,
234
+ num_attention_heads: Optional[int] = 32,
235
+ num_kv_heads: Optional[int] = None,
236
+ multiple_of: Optional[int] = 256,
237
+ ffn_dim_multiplier: Optional[float] = None,
238
+ norm_eps: Optional[float] = 1e-5,
239
+ learn_sigma: Optional[bool] = True,
240
+ qk_norm: Optional[bool] = True,
241
+ cross_attention_dim: Optional[int] = 2048,
242
+ scaling_factor: Optional[float] = 1.0,
243
+ ) -> None:
244
+ super().__init__()
245
+ self.sample_size = sample_size
246
+ self.patch_size = patch_size
247
+ self.in_channels = in_channels
248
+ self.out_channels = in_channels * 2 if learn_sigma else in_channels
249
+ self.hidden_size = hidden_size
250
+ self.num_attention_heads = num_attention_heads
251
+ self.head_dim = hidden_size // num_attention_heads
252
+ self.scaling_factor = scaling_factor
253
+
254
+ self.patch_embedder = LuminaPatchEmbed(
255
+ patch_size=patch_size, in_channels=in_channels, embed_dim=hidden_size, bias=True
256
+ )
257
+
258
+ self.pad_token = nn.Parameter(torch.empty(hidden_size))
259
+
260
+ self.time_caption_embed = LuminaCombinedTimestepCaptionEmbedding(
261
+ hidden_size=min(hidden_size, 1024), cross_attention_dim=cross_attention_dim
262
+ )
263
+
264
+ self.layers = nn.ModuleList(
265
+ [
266
+ LuminaNextDiTBlock(
267
+ hidden_size,
268
+ num_attention_heads,
269
+ num_kv_heads,
270
+ multiple_of,
271
+ ffn_dim_multiplier,
272
+ norm_eps,
273
+ qk_norm,
274
+ cross_attention_dim,
275
+ )
276
+ for _ in range(num_layers)
277
+ ]
278
+ )
279
+ self.norm_out = LuminaLayerNormContinuous(
280
+ embedding_dim=hidden_size,
281
+ conditioning_embedding_dim=min(hidden_size, 1024),
282
+ elementwise_affine=False,
283
+ eps=1e-6,
284
+ bias=True,
285
+ out_dim=patch_size * patch_size * self.out_channels,
286
+ )
287
+ # self.final_layer = LuminaFinalLayer(hidden_size, patch_size, self.out_channels)
288
+
289
+ assert (hidden_size // num_attention_heads) % 4 == 0, "2d rope needs head dim to be divisible by 4"
290
+
291
+ def forward(
292
+ self,
293
+ hidden_states: torch.Tensor,
294
+ timestep: torch.Tensor,
295
+ encoder_hidden_states: torch.Tensor,
296
+ encoder_mask: torch.Tensor,
297
+ image_rotary_emb: torch.Tensor,
298
+ cross_attention_kwargs: Dict[str, Any] = None,
299
+ return_dict=True,
300
+ ) -> torch.Tensor:
301
+ """
302
+ Forward pass of LuminaNextDiT.
303
+
304
+ Parameters:
305
+ hidden_states (torch.Tensor): Input tensor of shape (N, C, H, W).
306
+ timestep (torch.Tensor): Tensor of diffusion timesteps of shape (N,).
307
+ encoder_hidden_states (torch.Tensor): Tensor of caption features of shape (N, D).
308
+ encoder_mask (torch.Tensor): Tensor of caption masks of shape (N, L).
309
+ """
310
+ hidden_states, mask, img_size, image_rotary_emb = self.patch_embedder(hidden_states, image_rotary_emb)
311
+ image_rotary_emb = image_rotary_emb.to(hidden_states.device)
312
+
313
+ temb = self.time_caption_embed(timestep, encoder_hidden_states, encoder_mask)
314
+
315
+ encoder_mask = encoder_mask.bool()
316
+ for layer in self.layers:
317
+ hidden_states = layer(
318
+ hidden_states,
319
+ mask,
320
+ image_rotary_emb,
321
+ encoder_hidden_states,
322
+ encoder_mask,
323
+ temb=temb,
324
+ cross_attention_kwargs=cross_attention_kwargs,
325
+ )
326
+
327
+ hidden_states = self.norm_out(hidden_states, temb)
328
+
329
+ # unpatchify
330
+ height_tokens = width_tokens = self.patch_size
331
+ height, width = img_size[0]
332
+ batch_size = hidden_states.size(0)
333
+ sequence_length = (height // height_tokens) * (width // width_tokens)
334
+ hidden_states = hidden_states[:, :sequence_length].view(
335
+ batch_size, height // height_tokens, width // width_tokens, height_tokens, width_tokens, self.out_channels
336
+ )
337
+ output = hidden_states.permute(0, 5, 1, 3, 2, 4).flatten(4, 5).flatten(2, 3)
338
+
339
+ if not return_dict:
340
+ return (output,)
341
+
342
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/pixart_transformer_2d.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Dict, Optional, Union
15
+
16
+ import torch
17
+ from torch import nn
18
+
19
+ from ...configuration_utils import ConfigMixin, register_to_config
20
+ from ...utils import logging
21
+ from ..attention import BasicTransformerBlock
22
+ from ..attention_processor import Attention, AttentionProcessor, AttnProcessor, FusedAttnProcessor2_0
23
+ from ..embeddings import PatchEmbed, PixArtAlphaTextProjection
24
+ from ..modeling_outputs import Transformer2DModelOutput
25
+ from ..modeling_utils import ModelMixin
26
+ from ..normalization import AdaLayerNormSingle
27
+
28
+
29
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
30
+
31
+
32
+ class PixArtTransformer2DModel(ModelMixin, ConfigMixin):
33
+ r"""
34
+ A 2D Transformer model as introduced in PixArt family of models (https://huggingface.co/papers/2310.00426,
35
+ https://huggingface.co/papers/2403.04692).
36
+
37
+ Parameters:
38
+ num_attention_heads (int, optional, defaults to 16): The number of heads to use for multi-head attention.
39
+ attention_head_dim (int, optional, defaults to 72): The number of channels in each head.
40
+ in_channels (int, defaults to 4): The number of channels in the input.
41
+ out_channels (int, optional):
42
+ The number of channels in the output. Specify this parameter if the output channel number differs from the
43
+ input.
44
+ num_layers (int, optional, defaults to 28): The number of layers of Transformer blocks to use.
45
+ dropout (float, optional, defaults to 0.0): The dropout probability to use within the Transformer blocks.
46
+ norm_num_groups (int, optional, defaults to 32):
47
+ Number of groups for group normalization within Transformer blocks.
48
+ cross_attention_dim (int, optional):
49
+ The dimensionality for cross-attention layers, typically matching the encoder's hidden dimension.
50
+ attention_bias (bool, optional, defaults to True):
51
+ Configure if the Transformer blocks' attention should contain a bias parameter.
52
+ sample_size (int, defaults to 128):
53
+ The width of the latent images. This parameter is fixed during training.
54
+ patch_size (int, defaults to 2):
55
+ Size of the patches the model processes, relevant for architectures working on non-sequential data.
56
+ activation_fn (str, optional, defaults to "gelu-approximate"):
57
+ Activation function to use in feed-forward networks within Transformer blocks.
58
+ num_embeds_ada_norm (int, optional, defaults to 1000):
59
+ Number of embeddings for AdaLayerNorm, fixed during training and affects the maximum denoising steps during
60
+ inference.
61
+ upcast_attention (bool, optional, defaults to False):
62
+ If true, upcasts the attention mechanism dimensions for potentially improved performance.
63
+ norm_type (str, optional, defaults to "ada_norm_zero"):
64
+ Specifies the type of normalization used, can be 'ada_norm_zero'.
65
+ norm_elementwise_affine (bool, optional, defaults to False):
66
+ If true, enables element-wise affine parameters in the normalization layers.
67
+ norm_eps (float, optional, defaults to 1e-6):
68
+ A small constant added to the denominator in normalization layers to prevent division by zero.
69
+ interpolation_scale (int, optional): Scale factor to use during interpolating the position embeddings.
70
+ use_additional_conditions (bool, optional): If we're using additional conditions as inputs.
71
+ attention_type (str, optional, defaults to "default"): Kind of attention mechanism to be used.
72
+ caption_channels (int, optional, defaults to None):
73
+ Number of channels to use for projecting the caption embeddings.
74
+ use_linear_projection (bool, optional, defaults to False):
75
+ Deprecated argument. Will be removed in a future version.
76
+ num_vector_embeds (bool, optional, defaults to False):
77
+ Deprecated argument. Will be removed in a future version.
78
+ """
79
+
80
+ _supports_gradient_checkpointing = True
81
+ _no_split_modules = ["BasicTransformerBlock", "PatchEmbed"]
82
+ _skip_layerwise_casting_patterns = ["pos_embed", "norm", "adaln_single"]
83
+
84
+ @register_to_config
85
+ def __init__(
86
+ self,
87
+ num_attention_heads: int = 16,
88
+ attention_head_dim: int = 72,
89
+ in_channels: int = 4,
90
+ out_channels: Optional[int] = 8,
91
+ num_layers: int = 28,
92
+ dropout: float = 0.0,
93
+ norm_num_groups: int = 32,
94
+ cross_attention_dim: Optional[int] = 1152,
95
+ attention_bias: bool = True,
96
+ sample_size: int = 128,
97
+ patch_size: int = 2,
98
+ activation_fn: str = "gelu-approximate",
99
+ num_embeds_ada_norm: Optional[int] = 1000,
100
+ upcast_attention: bool = False,
101
+ norm_type: str = "ada_norm_single",
102
+ norm_elementwise_affine: bool = False,
103
+ norm_eps: float = 1e-6,
104
+ interpolation_scale: Optional[int] = None,
105
+ use_additional_conditions: Optional[bool] = None,
106
+ caption_channels: Optional[int] = None,
107
+ attention_type: Optional[str] = "default",
108
+ ):
109
+ super().__init__()
110
+
111
+ # Validate inputs.
112
+ if norm_type != "ada_norm_single":
113
+ raise NotImplementedError(
114
+ f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'."
115
+ )
116
+ elif norm_type == "ada_norm_single" and num_embeds_ada_norm is None:
117
+ raise ValueError(
118
+ f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None."
119
+ )
120
+
121
+ # Set some common variables used across the board.
122
+ self.attention_head_dim = attention_head_dim
123
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
124
+ self.out_channels = in_channels if out_channels is None else out_channels
125
+ if use_additional_conditions is None:
126
+ if sample_size == 128:
127
+ use_additional_conditions = True
128
+ else:
129
+ use_additional_conditions = False
130
+ self.use_additional_conditions = use_additional_conditions
131
+
132
+ self.gradient_checkpointing = False
133
+
134
+ # 2. Initialize the position embedding and transformer blocks.
135
+ self.height = self.config.sample_size
136
+ self.width = self.config.sample_size
137
+
138
+ interpolation_scale = (
139
+ self.config.interpolation_scale
140
+ if self.config.interpolation_scale is not None
141
+ else max(self.config.sample_size // 64, 1)
142
+ )
143
+ self.pos_embed = PatchEmbed(
144
+ height=self.config.sample_size,
145
+ width=self.config.sample_size,
146
+ patch_size=self.config.patch_size,
147
+ in_channels=self.config.in_channels,
148
+ embed_dim=self.inner_dim,
149
+ interpolation_scale=interpolation_scale,
150
+ )
151
+
152
+ self.transformer_blocks = nn.ModuleList(
153
+ [
154
+ BasicTransformerBlock(
155
+ self.inner_dim,
156
+ self.config.num_attention_heads,
157
+ self.config.attention_head_dim,
158
+ dropout=self.config.dropout,
159
+ cross_attention_dim=self.config.cross_attention_dim,
160
+ activation_fn=self.config.activation_fn,
161
+ num_embeds_ada_norm=self.config.num_embeds_ada_norm,
162
+ attention_bias=self.config.attention_bias,
163
+ upcast_attention=self.config.upcast_attention,
164
+ norm_type=norm_type,
165
+ norm_elementwise_affine=self.config.norm_elementwise_affine,
166
+ norm_eps=self.config.norm_eps,
167
+ attention_type=self.config.attention_type,
168
+ )
169
+ for _ in range(self.config.num_layers)
170
+ ]
171
+ )
172
+
173
+ # 3. Output blocks.
174
+ self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6)
175
+ self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5)
176
+ self.proj_out = nn.Linear(self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels)
177
+
178
+ self.adaln_single = AdaLayerNormSingle(
179
+ self.inner_dim, use_additional_conditions=self.use_additional_conditions
180
+ )
181
+ self.caption_projection = None
182
+ if self.config.caption_channels is not None:
183
+ self.caption_projection = PixArtAlphaTextProjection(
184
+ in_features=self.config.caption_channels, hidden_size=self.inner_dim
185
+ )
186
+
187
+ @property
188
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
189
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
190
+ r"""
191
+ Returns:
192
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
193
+ indexed by its weight name.
194
+ """
195
+ # set recursively
196
+ processors = {}
197
+
198
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
199
+ if hasattr(module, "get_processor"):
200
+ processors[f"{name}.processor"] = module.get_processor()
201
+
202
+ for sub_name, child in module.named_children():
203
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
204
+
205
+ return processors
206
+
207
+ for name, module in self.named_children():
208
+ fn_recursive_add_processors(name, module, processors)
209
+
210
+ return processors
211
+
212
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
213
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
214
+ r"""
215
+ Sets the attention processor to use to compute attention.
216
+
217
+ Parameters:
218
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
219
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
220
+ for **all** `Attention` layers.
221
+
222
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
223
+ processor. This is strongly recommended when setting trainable attention processors.
224
+
225
+ """
226
+ count = len(self.attn_processors.keys())
227
+
228
+ if isinstance(processor, dict) and len(processor) != count:
229
+ raise ValueError(
230
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
231
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
232
+ )
233
+
234
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
235
+ if hasattr(module, "set_processor"):
236
+ if not isinstance(processor, dict):
237
+ module.set_processor(processor)
238
+ else:
239
+ module.set_processor(processor.pop(f"{name}.processor"))
240
+
241
+ for sub_name, child in module.named_children():
242
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
243
+
244
+ for name, module in self.named_children():
245
+ fn_recursive_attn_processor(name, module, processor)
246
+
247
+ def set_default_attn_processor(self):
248
+ """
249
+ Disables custom attention processors and sets the default attention implementation.
250
+
251
+ Safe to just use `AttnProcessor()` as PixArt doesn't have any exotic attention processors in default model.
252
+ """
253
+ self.set_attn_processor(AttnProcessor())
254
+
255
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
256
+ def fuse_qkv_projections(self):
257
+ """
258
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
259
+ are fused. For cross-attention modules, key and value projection matrices are fused.
260
+
261
+ <Tip warning={true}>
262
+
263
+ This API is 🧪 experimental.
264
+
265
+ </Tip>
266
+ """
267
+ self.original_attn_processors = None
268
+
269
+ for _, attn_processor in self.attn_processors.items():
270
+ if "Added" in str(attn_processor.__class__.__name__):
271
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
272
+
273
+ self.original_attn_processors = self.attn_processors
274
+
275
+ for module in self.modules():
276
+ if isinstance(module, Attention):
277
+ module.fuse_projections(fuse=True)
278
+
279
+ self.set_attn_processor(FusedAttnProcessor2_0())
280
+
281
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
282
+ def unfuse_qkv_projections(self):
283
+ """Disables the fused QKV projection if enabled.
284
+
285
+ <Tip warning={true}>
286
+
287
+ This API is 🧪 experimental.
288
+
289
+ </Tip>
290
+
291
+ """
292
+ if self.original_attn_processors is not None:
293
+ self.set_attn_processor(self.original_attn_processors)
294
+
295
+ def forward(
296
+ self,
297
+ hidden_states: torch.Tensor,
298
+ encoder_hidden_states: Optional[torch.Tensor] = None,
299
+ timestep: Optional[torch.LongTensor] = None,
300
+ added_cond_kwargs: Dict[str, torch.Tensor] = None,
301
+ cross_attention_kwargs: Dict[str, Any] = None,
302
+ attention_mask: Optional[torch.Tensor] = None,
303
+ encoder_attention_mask: Optional[torch.Tensor] = None,
304
+ return_dict: bool = True,
305
+ ):
306
+ """
307
+ The [`PixArtTransformer2DModel`] forward method.
308
+
309
+ Args:
310
+ hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`):
311
+ Input `hidden_states`.
312
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
313
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
314
+ self-attention.
315
+ timestep (`torch.LongTensor`, *optional*):
316
+ Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
317
+ added_cond_kwargs: (`Dict[str, Any]`, *optional*): Additional conditions to be used as inputs.
318
+ cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
319
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
320
+ `self.processor` in
321
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
322
+ attention_mask ( `torch.Tensor`, *optional*):
323
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
324
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
325
+ negative values to the attention scores corresponding to "discard" tokens.
326
+ encoder_attention_mask ( `torch.Tensor`, *optional*):
327
+ Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
328
+
329
+ * Mask `(batch, sequence_length)` True = keep, False = discard.
330
+ * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
331
+
332
+ If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
333
+ above. This bias will be added to the cross-attention scores.
334
+ return_dict (`bool`, *optional*, defaults to `True`):
335
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
336
+ tuple.
337
+
338
+ Returns:
339
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
340
+ `tuple` where the first element is the sample tensor.
341
+ """
342
+ if self.use_additional_conditions and added_cond_kwargs is None:
343
+ raise ValueError("`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`.")
344
+
345
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
346
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
347
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
348
+ # expects mask of shape:
349
+ # [batch, key_tokens]
350
+ # adds singleton query_tokens dimension:
351
+ # [batch, 1, key_tokens]
352
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
353
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
354
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
355
+ if attention_mask is not None and attention_mask.ndim == 2:
356
+ # assume that mask is expressed as:
357
+ # (1 = keep, 0 = discard)
358
+ # convert mask into a bias that can be added to attention scores:
359
+ # (keep = +0, discard = -10000.0)
360
+ attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
361
+ attention_mask = attention_mask.unsqueeze(1)
362
+
363
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
364
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
365
+ encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
366
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
367
+
368
+ # 1. Input
369
+ batch_size = hidden_states.shape[0]
370
+ height, width = (
371
+ hidden_states.shape[-2] // self.config.patch_size,
372
+ hidden_states.shape[-1] // self.config.patch_size,
373
+ )
374
+ hidden_states = self.pos_embed(hidden_states)
375
+
376
+ timestep, embedded_timestep = self.adaln_single(
377
+ timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype
378
+ )
379
+
380
+ if self.caption_projection is not None:
381
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states)
382
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])
383
+
384
+ # 2. Blocks
385
+ for block in self.transformer_blocks:
386
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
387
+ hidden_states = self._gradient_checkpointing_func(
388
+ block,
389
+ hidden_states,
390
+ attention_mask,
391
+ encoder_hidden_states,
392
+ encoder_attention_mask,
393
+ timestep,
394
+ cross_attention_kwargs,
395
+ None,
396
+ )
397
+ else:
398
+ hidden_states = block(
399
+ hidden_states,
400
+ attention_mask=attention_mask,
401
+ encoder_hidden_states=encoder_hidden_states,
402
+ encoder_attention_mask=encoder_attention_mask,
403
+ timestep=timestep,
404
+ cross_attention_kwargs=cross_attention_kwargs,
405
+ class_labels=None,
406
+ )
407
+
408
+ # 3. Output
409
+ shift, scale = (
410
+ self.scale_shift_table[None] + embedded_timestep[:, None].to(self.scale_shift_table.device)
411
+ ).chunk(2, dim=1)
412
+ hidden_states = self.norm_out(hidden_states)
413
+ # Modulation
414
+ hidden_states = hidden_states * (1 + scale.to(hidden_states.device)) + shift.to(hidden_states.device)
415
+ hidden_states = self.proj_out(hidden_states)
416
+ hidden_states = hidden_states.squeeze(1)
417
+
418
+ # unpatchify
419
+ hidden_states = hidden_states.reshape(
420
+ shape=(-1, height, width, self.config.patch_size, self.config.patch_size, self.out_channels)
421
+ )
422
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
423
+ output = hidden_states.reshape(
424
+ shape=(-1, self.out_channels, height * self.config.patch_size, width * self.config.patch_size)
425
+ )
426
+
427
+ if not return_dict:
428
+ return (output,)
429
+
430
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/prior_transformer.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Dict, Optional, Union
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from torch import nn
7
+
8
+ from ...configuration_utils import ConfigMixin, register_to_config
9
+ from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin
10
+ from ...utils import BaseOutput
11
+ from ..attention import BasicTransformerBlock
12
+ from ..attention_processor import (
13
+ ADDED_KV_ATTENTION_PROCESSORS,
14
+ CROSS_ATTENTION_PROCESSORS,
15
+ AttentionProcessor,
16
+ AttnAddedKVProcessor,
17
+ AttnProcessor,
18
+ )
19
+ from ..embeddings import TimestepEmbedding, Timesteps
20
+ from ..modeling_utils import ModelMixin
21
+
22
+
23
+ @dataclass
24
+ class PriorTransformerOutput(BaseOutput):
25
+ """
26
+ The output of [`PriorTransformer`].
27
+
28
+ Args:
29
+ predicted_image_embedding (`torch.Tensor` of shape `(batch_size, embedding_dim)`):
30
+ The predicted CLIP image embedding conditioned on the CLIP text embedding input.
31
+ """
32
+
33
+ predicted_image_embedding: torch.Tensor
34
+
35
+
36
+ class PriorTransformer(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin):
37
+ """
38
+ A Prior Transformer model.
39
+
40
+ Parameters:
41
+ num_attention_heads (`int`, *optional*, defaults to 32): The number of heads to use for multi-head attention.
42
+ attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
43
+ num_layers (`int`, *optional*, defaults to 20): The number of layers of Transformer blocks to use.
44
+ embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `hidden_states`
45
+ num_embeddings (`int`, *optional*, defaults to 77):
46
+ The number of embeddings of the model input `hidden_states`
47
+ additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the
48
+ projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings +
49
+ additional_embeddings`.
50
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
51
+ time_embed_act_fn (`str`, *optional*, defaults to 'silu'):
52
+ The activation function to use to create timestep embeddings.
53
+ norm_in_type (`str`, *optional*, defaults to None): The normalization layer to apply on hidden states before
54
+ passing to Transformer blocks. Set it to `None` if normalization is not needed.
55
+ embedding_proj_norm_type (`str`, *optional*, defaults to None):
56
+ The normalization layer to apply on the input `proj_embedding`. Set it to `None` if normalization is not
57
+ needed.
58
+ encoder_hid_proj_type (`str`, *optional*, defaults to `linear`):
59
+ The projection layer to apply on the input `encoder_hidden_states`. Set it to `None` if
60
+ `encoder_hidden_states` is `None`.
61
+ added_emb_type (`str`, *optional*, defaults to `prd`): Additional embeddings to condition the model.
62
+ Choose from `prd` or `None`. if choose `prd`, it will prepend a token indicating the (quantized) dot
63
+ product between the text embedding and image embedding as proposed in the unclip paper
64
+ https://huggingface.co/papers/2204.06125 If it is `None`, no additional embeddings will be prepended.
65
+ time_embed_dim (`int, *optional*, defaults to None): The dimension of timestep embeddings.
66
+ If None, will be set to `num_attention_heads * attention_head_dim`
67
+ embedding_proj_dim (`int`, *optional*, default to None):
68
+ The dimension of `proj_embedding`. If None, will be set to `embedding_dim`.
69
+ clip_embed_dim (`int`, *optional*, default to None):
70
+ The dimension of the output. If None, will be set to `embedding_dim`.
71
+ """
72
+
73
+ @register_to_config
74
+ def __init__(
75
+ self,
76
+ num_attention_heads: int = 32,
77
+ attention_head_dim: int = 64,
78
+ num_layers: int = 20,
79
+ embedding_dim: int = 768,
80
+ num_embeddings=77,
81
+ additional_embeddings=4,
82
+ dropout: float = 0.0,
83
+ time_embed_act_fn: str = "silu",
84
+ norm_in_type: Optional[str] = None, # layer
85
+ embedding_proj_norm_type: Optional[str] = None, # layer
86
+ encoder_hid_proj_type: Optional[str] = "linear", # linear
87
+ added_emb_type: Optional[str] = "prd", # prd
88
+ time_embed_dim: Optional[int] = None,
89
+ embedding_proj_dim: Optional[int] = None,
90
+ clip_embed_dim: Optional[int] = None,
91
+ ):
92
+ super().__init__()
93
+ self.num_attention_heads = num_attention_heads
94
+ self.attention_head_dim = attention_head_dim
95
+ inner_dim = num_attention_heads * attention_head_dim
96
+ self.additional_embeddings = additional_embeddings
97
+
98
+ time_embed_dim = time_embed_dim or inner_dim
99
+ embedding_proj_dim = embedding_proj_dim or embedding_dim
100
+ clip_embed_dim = clip_embed_dim or embedding_dim
101
+
102
+ self.time_proj = Timesteps(inner_dim, True, 0)
103
+ self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, out_dim=inner_dim, act_fn=time_embed_act_fn)
104
+
105
+ self.proj_in = nn.Linear(embedding_dim, inner_dim)
106
+
107
+ if embedding_proj_norm_type is None:
108
+ self.embedding_proj_norm = None
109
+ elif embedding_proj_norm_type == "layer":
110
+ self.embedding_proj_norm = nn.LayerNorm(embedding_proj_dim)
111
+ else:
112
+ raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}")
113
+
114
+ self.embedding_proj = nn.Linear(embedding_proj_dim, inner_dim)
115
+
116
+ if encoder_hid_proj_type is None:
117
+ self.encoder_hidden_states_proj = None
118
+ elif encoder_hid_proj_type == "linear":
119
+ self.encoder_hidden_states_proj = nn.Linear(embedding_dim, inner_dim)
120
+ else:
121
+ raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}")
122
+
123
+ self.positional_embedding = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, inner_dim))
124
+
125
+ if added_emb_type == "prd":
126
+ self.prd_embedding = nn.Parameter(torch.zeros(1, 1, inner_dim))
127
+ elif added_emb_type is None:
128
+ self.prd_embedding = None
129
+ else:
130
+ raise ValueError(
131
+ f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`."
132
+ )
133
+
134
+ self.transformer_blocks = nn.ModuleList(
135
+ [
136
+ BasicTransformerBlock(
137
+ inner_dim,
138
+ num_attention_heads,
139
+ attention_head_dim,
140
+ dropout=dropout,
141
+ activation_fn="gelu",
142
+ attention_bias=True,
143
+ )
144
+ for d in range(num_layers)
145
+ ]
146
+ )
147
+
148
+ if norm_in_type == "layer":
149
+ self.norm_in = nn.LayerNorm(inner_dim)
150
+ elif norm_in_type is None:
151
+ self.norm_in = None
152
+ else:
153
+ raise ValueError(f"Unsupported norm_in_type: {norm_in_type}.")
154
+
155
+ self.norm_out = nn.LayerNorm(inner_dim)
156
+
157
+ self.proj_to_clip_embeddings = nn.Linear(inner_dim, clip_embed_dim)
158
+
159
+ causal_attention_mask = torch.full(
160
+ [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -10000.0
161
+ )
162
+ causal_attention_mask.triu_(1)
163
+ causal_attention_mask = causal_attention_mask[None, ...]
164
+ self.register_buffer("causal_attention_mask", causal_attention_mask, persistent=False)
165
+
166
+ self.clip_mean = nn.Parameter(torch.zeros(1, clip_embed_dim))
167
+ self.clip_std = nn.Parameter(torch.zeros(1, clip_embed_dim))
168
+
169
+ @property
170
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
171
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
172
+ r"""
173
+ Returns:
174
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
175
+ indexed by its weight name.
176
+ """
177
+ # set recursively
178
+ processors = {}
179
+
180
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
181
+ if hasattr(module, "get_processor"):
182
+ processors[f"{name}.processor"] = module.get_processor()
183
+
184
+ for sub_name, child in module.named_children():
185
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
186
+
187
+ return processors
188
+
189
+ for name, module in self.named_children():
190
+ fn_recursive_add_processors(name, module, processors)
191
+
192
+ return processors
193
+
194
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
195
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
196
+ r"""
197
+ Sets the attention processor to use to compute attention.
198
+
199
+ Parameters:
200
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
201
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
202
+ for **all** `Attention` layers.
203
+
204
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
205
+ processor. This is strongly recommended when setting trainable attention processors.
206
+
207
+ """
208
+ count = len(self.attn_processors.keys())
209
+
210
+ if isinstance(processor, dict) and len(processor) != count:
211
+ raise ValueError(
212
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
213
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
214
+ )
215
+
216
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
217
+ if hasattr(module, "set_processor"):
218
+ if not isinstance(processor, dict):
219
+ module.set_processor(processor)
220
+ else:
221
+ module.set_processor(processor.pop(f"{name}.processor"))
222
+
223
+ for sub_name, child in module.named_children():
224
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
225
+
226
+ for name, module in self.named_children():
227
+ fn_recursive_attn_processor(name, module, processor)
228
+
229
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
230
+ def set_default_attn_processor(self):
231
+ """
232
+ Disables custom attention processors and sets the default attention implementation.
233
+ """
234
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
235
+ processor = AttnAddedKVProcessor()
236
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
237
+ processor = AttnProcessor()
238
+ else:
239
+ raise ValueError(
240
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
241
+ )
242
+
243
+ self.set_attn_processor(processor)
244
+
245
+ def forward(
246
+ self,
247
+ hidden_states,
248
+ timestep: Union[torch.Tensor, float, int],
249
+ proj_embedding: torch.Tensor,
250
+ encoder_hidden_states: Optional[torch.Tensor] = None,
251
+ attention_mask: Optional[torch.BoolTensor] = None,
252
+ return_dict: bool = True,
253
+ ):
254
+ """
255
+ The [`PriorTransformer`] forward method.
256
+
257
+ Args:
258
+ hidden_states (`torch.Tensor` of shape `(batch_size, embedding_dim)`):
259
+ The currently predicted image embeddings.
260
+ timestep (`torch.LongTensor`):
261
+ Current denoising step.
262
+ proj_embedding (`torch.Tensor` of shape `(batch_size, embedding_dim)`):
263
+ Projected embedding vector the denoising process is conditioned on.
264
+ encoder_hidden_states (`torch.Tensor` of shape `(batch_size, num_embeddings, embedding_dim)`):
265
+ Hidden states of the text embeddings the denoising process is conditioned on.
266
+ attention_mask (`torch.BoolTensor` of shape `(batch_size, num_embeddings)`):
267
+ Text mask for the text embeddings.
268
+ return_dict (`bool`, *optional*, defaults to `True`):
269
+ Whether or not to return a [`~models.transformers.prior_transformer.PriorTransformerOutput`] instead of
270
+ a plain tuple.
271
+
272
+ Returns:
273
+ [`~models.transformers.prior_transformer.PriorTransformerOutput`] or `tuple`:
274
+ If return_dict is True, a [`~models.transformers.prior_transformer.PriorTransformerOutput`] is
275
+ returned, otherwise a tuple is returned where the first element is the sample tensor.
276
+ """
277
+ batch_size = hidden_states.shape[0]
278
+
279
+ timesteps = timestep
280
+ if not torch.is_tensor(timesteps):
281
+ timesteps = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device)
282
+ elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
283
+ timesteps = timesteps[None].to(hidden_states.device)
284
+
285
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
286
+ timesteps = timesteps * torch.ones(batch_size, dtype=timesteps.dtype, device=timesteps.device)
287
+
288
+ timesteps_projected = self.time_proj(timesteps)
289
+
290
+ # timesteps does not contain any weights and will always return f32 tensors
291
+ # but time_embedding might be fp16, so we need to cast here.
292
+ timesteps_projected = timesteps_projected.to(dtype=self.dtype)
293
+ time_embeddings = self.time_embedding(timesteps_projected)
294
+
295
+ if self.embedding_proj_norm is not None:
296
+ proj_embedding = self.embedding_proj_norm(proj_embedding)
297
+
298
+ proj_embeddings = self.embedding_proj(proj_embedding)
299
+ if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
300
+ encoder_hidden_states = self.encoder_hidden_states_proj(encoder_hidden_states)
301
+ elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
302
+ raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set")
303
+
304
+ hidden_states = self.proj_in(hidden_states)
305
+
306
+ positional_embeddings = self.positional_embedding.to(hidden_states.dtype)
307
+
308
+ additional_embeds = []
309
+ additional_embeddings_len = 0
310
+
311
+ if encoder_hidden_states is not None:
312
+ additional_embeds.append(encoder_hidden_states)
313
+ additional_embeddings_len += encoder_hidden_states.shape[1]
314
+
315
+ if len(proj_embeddings.shape) == 2:
316
+ proj_embeddings = proj_embeddings[:, None, :]
317
+
318
+ if len(hidden_states.shape) == 2:
319
+ hidden_states = hidden_states[:, None, :]
320
+
321
+ additional_embeds = additional_embeds + [
322
+ proj_embeddings,
323
+ time_embeddings[:, None, :],
324
+ hidden_states,
325
+ ]
326
+
327
+ if self.prd_embedding is not None:
328
+ prd_embedding = self.prd_embedding.to(hidden_states.dtype).expand(batch_size, -1, -1)
329
+ additional_embeds.append(prd_embedding)
330
+
331
+ hidden_states = torch.cat(
332
+ additional_embeds,
333
+ dim=1,
334
+ )
335
+
336
+ # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
337
+ additional_embeddings_len = additional_embeddings_len + proj_embeddings.shape[1] + 1
338
+ if positional_embeddings.shape[1] < hidden_states.shape[1]:
339
+ positional_embeddings = F.pad(
340
+ positional_embeddings,
341
+ (
342
+ 0,
343
+ 0,
344
+ additional_embeddings_len,
345
+ self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
346
+ ),
347
+ value=0.0,
348
+ )
349
+
350
+ hidden_states = hidden_states + positional_embeddings
351
+
352
+ if attention_mask is not None:
353
+ attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
354
+ attention_mask = F.pad(attention_mask, (0, self.additional_embeddings), value=0.0)
355
+ attention_mask = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
356
+ attention_mask = attention_mask.repeat_interleave(
357
+ self.config.num_attention_heads,
358
+ dim=0,
359
+ output_size=attention_mask.shape[0] * self.config.num_attention_heads,
360
+ )
361
+
362
+ if self.norm_in is not None:
363
+ hidden_states = self.norm_in(hidden_states)
364
+
365
+ for block in self.transformer_blocks:
366
+ hidden_states = block(hidden_states, attention_mask=attention_mask)
367
+
368
+ hidden_states = self.norm_out(hidden_states)
369
+
370
+ if self.prd_embedding is not None:
371
+ hidden_states = hidden_states[:, -1]
372
+ else:
373
+ hidden_states = hidden_states[:, additional_embeddings_len:]
374
+
375
+ predicted_image_embedding = self.proj_to_clip_embeddings(hidden_states)
376
+
377
+ if not return_dict:
378
+ return (predicted_image_embedding,)
379
+
380
+ return PriorTransformerOutput(predicted_image_embedding=predicted_image_embedding)
381
+
382
+ def post_process_latents(self, prior_latents):
383
+ prior_latents = (prior_latents * self.clip_std) + self.clip_mean
384
+ return prior_latents
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/sana_transformer.py ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Dict, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn.functional as F
19
+ from torch import nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
23
+ from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
24
+ from ..attention_processor import (
25
+ Attention,
26
+ AttentionProcessor,
27
+ SanaLinearAttnProcessor2_0,
28
+ )
29
+ from ..embeddings import PatchEmbed, PixArtAlphaTextProjection, TimestepEmbedding, Timesteps
30
+ from ..modeling_outputs import Transformer2DModelOutput
31
+ from ..modeling_utils import ModelMixin
32
+ from ..normalization import AdaLayerNormSingle, RMSNorm
33
+
34
+
35
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
+
37
+
38
+ class GLUMBConv(nn.Module):
39
+ def __init__(
40
+ self,
41
+ in_channels: int,
42
+ out_channels: int,
43
+ expand_ratio: float = 4,
44
+ norm_type: Optional[str] = None,
45
+ residual_connection: bool = True,
46
+ ) -> None:
47
+ super().__init__()
48
+
49
+ hidden_channels = int(expand_ratio * in_channels)
50
+ self.norm_type = norm_type
51
+ self.residual_connection = residual_connection
52
+
53
+ self.nonlinearity = nn.SiLU()
54
+ self.conv_inverted = nn.Conv2d(in_channels, hidden_channels * 2, 1, 1, 0)
55
+ self.conv_depth = nn.Conv2d(hidden_channels * 2, hidden_channels * 2, 3, 1, 1, groups=hidden_channels * 2)
56
+ self.conv_point = nn.Conv2d(hidden_channels, out_channels, 1, 1, 0, bias=False)
57
+
58
+ self.norm = None
59
+ if norm_type == "rms_norm":
60
+ self.norm = RMSNorm(out_channels, eps=1e-5, elementwise_affine=True, bias=True)
61
+
62
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
63
+ if self.residual_connection:
64
+ residual = hidden_states
65
+
66
+ hidden_states = self.conv_inverted(hidden_states)
67
+ hidden_states = self.nonlinearity(hidden_states)
68
+
69
+ hidden_states = self.conv_depth(hidden_states)
70
+ hidden_states, gate = torch.chunk(hidden_states, 2, dim=1)
71
+ hidden_states = hidden_states * self.nonlinearity(gate)
72
+
73
+ hidden_states = self.conv_point(hidden_states)
74
+
75
+ if self.norm_type == "rms_norm":
76
+ # move channel to the last dimension so we apply RMSnorm across channel dimension
77
+ hidden_states = self.norm(hidden_states.movedim(1, -1)).movedim(-1, 1)
78
+
79
+ if self.residual_connection:
80
+ hidden_states = hidden_states + residual
81
+
82
+ return hidden_states
83
+
84
+
85
+ class SanaModulatedNorm(nn.Module):
86
+ def __init__(self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6):
87
+ super().__init__()
88
+ self.norm = nn.LayerNorm(dim, elementwise_affine=elementwise_affine, eps=eps)
89
+
90
+ def forward(
91
+ self, hidden_states: torch.Tensor, temb: torch.Tensor, scale_shift_table: torch.Tensor
92
+ ) -> torch.Tensor:
93
+ hidden_states = self.norm(hidden_states)
94
+ shift, scale = (scale_shift_table[None] + temb[:, None].to(scale_shift_table.device)).chunk(2, dim=1)
95
+ hidden_states = hidden_states * (1 + scale) + shift
96
+ return hidden_states
97
+
98
+
99
+ class SanaCombinedTimestepGuidanceEmbeddings(nn.Module):
100
+ def __init__(self, embedding_dim):
101
+ super().__init__()
102
+ self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
103
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
104
+
105
+ self.guidance_condition_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
106
+ self.guidance_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
107
+
108
+ self.silu = nn.SiLU()
109
+ self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)
110
+
111
+ def forward(self, timestep: torch.Tensor, guidance: torch.Tensor = None, hidden_dtype: torch.dtype = None):
112
+ timesteps_proj = self.time_proj(timestep)
113
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D)
114
+
115
+ guidance_proj = self.guidance_condition_proj(guidance)
116
+ guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=hidden_dtype))
117
+ conditioning = timesteps_emb + guidance_emb
118
+
119
+ return self.linear(self.silu(conditioning)), conditioning
120
+
121
+
122
+ class SanaAttnProcessor2_0:
123
+ r"""
124
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
125
+ """
126
+
127
+ def __init__(self):
128
+ if not hasattr(F, "scaled_dot_product_attention"):
129
+ raise ImportError("SanaAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
130
+
131
+ def __call__(
132
+ self,
133
+ attn: Attention,
134
+ hidden_states: torch.Tensor,
135
+ encoder_hidden_states: Optional[torch.Tensor] = None,
136
+ attention_mask: Optional[torch.Tensor] = None,
137
+ ) -> torch.Tensor:
138
+ batch_size, sequence_length, _ = (
139
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
140
+ )
141
+
142
+ if attention_mask is not None:
143
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
144
+ # scaled_dot_product_attention expects attention_mask shape to be
145
+ # (batch, heads, source_length, target_length)
146
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
147
+
148
+ query = attn.to_q(hidden_states)
149
+
150
+ if encoder_hidden_states is None:
151
+ encoder_hidden_states = hidden_states
152
+
153
+ key = attn.to_k(encoder_hidden_states)
154
+ value = attn.to_v(encoder_hidden_states)
155
+
156
+ if attn.norm_q is not None:
157
+ query = attn.norm_q(query)
158
+ if attn.norm_k is not None:
159
+ key = attn.norm_k(key)
160
+
161
+ inner_dim = key.shape[-1]
162
+ head_dim = inner_dim // attn.heads
163
+
164
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
165
+
166
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
167
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
168
+
169
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
170
+ # TODO: add support for attn.scale when we move to Torch 2.1
171
+ hidden_states = F.scaled_dot_product_attention(
172
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
173
+ )
174
+
175
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
176
+ hidden_states = hidden_states.to(query.dtype)
177
+
178
+ # linear proj
179
+ hidden_states = attn.to_out[0](hidden_states)
180
+ # dropout
181
+ hidden_states = attn.to_out[1](hidden_states)
182
+
183
+ hidden_states = hidden_states / attn.rescale_output_factor
184
+
185
+ return hidden_states
186
+
187
+
188
+ class SanaTransformerBlock(nn.Module):
189
+ r"""
190
+ Transformer block introduced in [Sana](https://huggingface.co/papers/2410.10629).
191
+ """
192
+
193
+ def __init__(
194
+ self,
195
+ dim: int = 2240,
196
+ num_attention_heads: int = 70,
197
+ attention_head_dim: int = 32,
198
+ dropout: float = 0.0,
199
+ num_cross_attention_heads: Optional[int] = 20,
200
+ cross_attention_head_dim: Optional[int] = 112,
201
+ cross_attention_dim: Optional[int] = 2240,
202
+ attention_bias: bool = True,
203
+ norm_elementwise_affine: bool = False,
204
+ norm_eps: float = 1e-6,
205
+ attention_out_bias: bool = True,
206
+ mlp_ratio: float = 2.5,
207
+ qk_norm: Optional[str] = None,
208
+ ) -> None:
209
+ super().__init__()
210
+
211
+ # 1. Self Attention
212
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=False, eps=norm_eps)
213
+ self.attn1 = Attention(
214
+ query_dim=dim,
215
+ heads=num_attention_heads,
216
+ dim_head=attention_head_dim,
217
+ kv_heads=num_attention_heads if qk_norm is not None else None,
218
+ qk_norm=qk_norm,
219
+ dropout=dropout,
220
+ bias=attention_bias,
221
+ cross_attention_dim=None,
222
+ processor=SanaLinearAttnProcessor2_0(),
223
+ )
224
+
225
+ # 2. Cross Attention
226
+ if cross_attention_dim is not None:
227
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
228
+ self.attn2 = Attention(
229
+ query_dim=dim,
230
+ qk_norm=qk_norm,
231
+ kv_heads=num_cross_attention_heads if qk_norm is not None else None,
232
+ cross_attention_dim=cross_attention_dim,
233
+ heads=num_cross_attention_heads,
234
+ dim_head=cross_attention_head_dim,
235
+ dropout=dropout,
236
+ bias=True,
237
+ out_bias=attention_out_bias,
238
+ processor=SanaAttnProcessor2_0(),
239
+ )
240
+
241
+ # 3. Feed-forward
242
+ self.ff = GLUMBConv(dim, dim, mlp_ratio, norm_type=None, residual_connection=False)
243
+
244
+ self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
245
+
246
+ def forward(
247
+ self,
248
+ hidden_states: torch.Tensor,
249
+ attention_mask: Optional[torch.Tensor] = None,
250
+ encoder_hidden_states: Optional[torch.Tensor] = None,
251
+ encoder_attention_mask: Optional[torch.Tensor] = None,
252
+ timestep: Optional[torch.LongTensor] = None,
253
+ height: int = None,
254
+ width: int = None,
255
+ ) -> torch.Tensor:
256
+ batch_size = hidden_states.shape[0]
257
+
258
+ # 1. Modulation
259
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
260
+ self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
261
+ ).chunk(6, dim=1)
262
+
263
+ # 2. Self Attention
264
+ norm_hidden_states = self.norm1(hidden_states)
265
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
266
+ norm_hidden_states = norm_hidden_states.to(hidden_states.dtype)
267
+
268
+ attn_output = self.attn1(norm_hidden_states)
269
+ hidden_states = hidden_states + gate_msa * attn_output
270
+
271
+ # 3. Cross Attention
272
+ if self.attn2 is not None:
273
+ attn_output = self.attn2(
274
+ hidden_states,
275
+ encoder_hidden_states=encoder_hidden_states,
276
+ attention_mask=encoder_attention_mask,
277
+ )
278
+ hidden_states = attn_output + hidden_states
279
+
280
+ # 4. Feed-forward
281
+ norm_hidden_states = self.norm2(hidden_states)
282
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
283
+
284
+ norm_hidden_states = norm_hidden_states.unflatten(1, (height, width)).permute(0, 3, 1, 2)
285
+ ff_output = self.ff(norm_hidden_states)
286
+ ff_output = ff_output.flatten(2, 3).permute(0, 2, 1)
287
+ hidden_states = hidden_states + gate_mlp * ff_output
288
+
289
+ return hidden_states
290
+
291
+
292
+ class SanaTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
293
+ r"""
294
+ A 2D Transformer model introduced in [Sana](https://huggingface.co/papers/2410.10629) family of models.
295
+
296
+ Args:
297
+ in_channels (`int`, defaults to `32`):
298
+ The number of channels in the input.
299
+ out_channels (`int`, *optional*, defaults to `32`):
300
+ The number of channels in the output.
301
+ num_attention_heads (`int`, defaults to `70`):
302
+ The number of heads to use for multi-head attention.
303
+ attention_head_dim (`int`, defaults to `32`):
304
+ The number of channels in each head.
305
+ num_layers (`int`, defaults to `20`):
306
+ The number of layers of Transformer blocks to use.
307
+ num_cross_attention_heads (`int`, *optional*, defaults to `20`):
308
+ The number of heads to use for cross-attention.
309
+ cross_attention_head_dim (`int`, *optional*, defaults to `112`):
310
+ The number of channels in each head for cross-attention.
311
+ cross_attention_dim (`int`, *optional*, defaults to `2240`):
312
+ The number of channels in the cross-attention output.
313
+ caption_channels (`int`, defaults to `2304`):
314
+ The number of channels in the caption embeddings.
315
+ mlp_ratio (`float`, defaults to `2.5`):
316
+ The expansion ratio to use in the GLUMBConv layer.
317
+ dropout (`float`, defaults to `0.0`):
318
+ The dropout probability.
319
+ attention_bias (`bool`, defaults to `False`):
320
+ Whether to use bias in the attention layer.
321
+ sample_size (`int`, defaults to `32`):
322
+ The base size of the input latent.
323
+ patch_size (`int`, defaults to `1`):
324
+ The size of the patches to use in the patch embedding layer.
325
+ norm_elementwise_affine (`bool`, defaults to `False`):
326
+ Whether to use elementwise affinity in the normalization layer.
327
+ norm_eps (`float`, defaults to `1e-6`):
328
+ The epsilon value for the normalization layer.
329
+ qk_norm (`str`, *optional*, defaults to `None`):
330
+ The normalization to use for the query and key.
331
+ timestep_scale (`float`, defaults to `1.0`):
332
+ The scale to use for the timesteps.
333
+ """
334
+
335
+ _supports_gradient_checkpointing = True
336
+ _no_split_modules = ["SanaTransformerBlock", "PatchEmbed", "SanaModulatedNorm"]
337
+ _skip_layerwise_casting_patterns = ["patch_embed", "norm"]
338
+
339
+ @register_to_config
340
+ def __init__(
341
+ self,
342
+ in_channels: int = 32,
343
+ out_channels: Optional[int] = 32,
344
+ num_attention_heads: int = 70,
345
+ attention_head_dim: int = 32,
346
+ num_layers: int = 20,
347
+ num_cross_attention_heads: Optional[int] = 20,
348
+ cross_attention_head_dim: Optional[int] = 112,
349
+ cross_attention_dim: Optional[int] = 2240,
350
+ caption_channels: int = 2304,
351
+ mlp_ratio: float = 2.5,
352
+ dropout: float = 0.0,
353
+ attention_bias: bool = False,
354
+ sample_size: int = 32,
355
+ patch_size: int = 1,
356
+ norm_elementwise_affine: bool = False,
357
+ norm_eps: float = 1e-6,
358
+ interpolation_scale: Optional[int] = None,
359
+ guidance_embeds: bool = False,
360
+ guidance_embeds_scale: float = 0.1,
361
+ qk_norm: Optional[str] = None,
362
+ timestep_scale: float = 1.0,
363
+ ) -> None:
364
+ super().__init__()
365
+
366
+ out_channels = out_channels or in_channels
367
+ inner_dim = num_attention_heads * attention_head_dim
368
+
369
+ # 1. Patch Embedding
370
+ self.patch_embed = PatchEmbed(
371
+ height=sample_size,
372
+ width=sample_size,
373
+ patch_size=patch_size,
374
+ in_channels=in_channels,
375
+ embed_dim=inner_dim,
376
+ interpolation_scale=interpolation_scale,
377
+ pos_embed_type="sincos" if interpolation_scale is not None else None,
378
+ )
379
+
380
+ # 2. Additional condition embeddings
381
+ if guidance_embeds:
382
+ self.time_embed = SanaCombinedTimestepGuidanceEmbeddings(inner_dim)
383
+ else:
384
+ self.time_embed = AdaLayerNormSingle(inner_dim)
385
+
386
+ self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim)
387
+ self.caption_norm = RMSNorm(inner_dim, eps=1e-5, elementwise_affine=True)
388
+
389
+ # 3. Transformer blocks
390
+ self.transformer_blocks = nn.ModuleList(
391
+ [
392
+ SanaTransformerBlock(
393
+ inner_dim,
394
+ num_attention_heads,
395
+ attention_head_dim,
396
+ dropout=dropout,
397
+ num_cross_attention_heads=num_cross_attention_heads,
398
+ cross_attention_head_dim=cross_attention_head_dim,
399
+ cross_attention_dim=cross_attention_dim,
400
+ attention_bias=attention_bias,
401
+ norm_elementwise_affine=norm_elementwise_affine,
402
+ norm_eps=norm_eps,
403
+ mlp_ratio=mlp_ratio,
404
+ qk_norm=qk_norm,
405
+ )
406
+ for _ in range(num_layers)
407
+ ]
408
+ )
409
+
410
+ # 4. Output blocks
411
+ self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5)
412
+ self.norm_out = SanaModulatedNorm(inner_dim, elementwise_affine=False, eps=1e-6)
413
+ self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels)
414
+
415
+ self.gradient_checkpointing = False
416
+
417
+ @property
418
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
419
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
420
+ r"""
421
+ Returns:
422
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
423
+ indexed by its weight name.
424
+ """
425
+ # set recursively
426
+ processors = {}
427
+
428
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
429
+ if hasattr(module, "get_processor"):
430
+ processors[f"{name}.processor"] = module.get_processor()
431
+
432
+ for sub_name, child in module.named_children():
433
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
434
+
435
+ return processors
436
+
437
+ for name, module in self.named_children():
438
+ fn_recursive_add_processors(name, module, processors)
439
+
440
+ return processors
441
+
442
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
443
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
444
+ r"""
445
+ Sets the attention processor to use to compute attention.
446
+
447
+ Parameters:
448
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
449
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
450
+ for **all** `Attention` layers.
451
+
452
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
453
+ processor. This is strongly recommended when setting trainable attention processors.
454
+
455
+ """
456
+ count = len(self.attn_processors.keys())
457
+
458
+ if isinstance(processor, dict) and len(processor) != count:
459
+ raise ValueError(
460
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
461
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
462
+ )
463
+
464
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
465
+ if hasattr(module, "set_processor"):
466
+ if not isinstance(processor, dict):
467
+ module.set_processor(processor)
468
+ else:
469
+ module.set_processor(processor.pop(f"{name}.processor"))
470
+
471
+ for sub_name, child in module.named_children():
472
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
473
+
474
+ for name, module in self.named_children():
475
+ fn_recursive_attn_processor(name, module, processor)
476
+
477
+ def forward(
478
+ self,
479
+ hidden_states: torch.Tensor,
480
+ encoder_hidden_states: torch.Tensor,
481
+ timestep: torch.Tensor,
482
+ guidance: Optional[torch.Tensor] = None,
483
+ encoder_attention_mask: Optional[torch.Tensor] = None,
484
+ attention_mask: Optional[torch.Tensor] = None,
485
+ attention_kwargs: Optional[Dict[str, Any]] = None,
486
+ controlnet_block_samples: Optional[Tuple[torch.Tensor]] = None,
487
+ return_dict: bool = True,
488
+ ) -> Union[Tuple[torch.Tensor, ...], Transformer2DModelOutput]:
489
+ if attention_kwargs is not None:
490
+ attention_kwargs = attention_kwargs.copy()
491
+ lora_scale = attention_kwargs.pop("scale", 1.0)
492
+ else:
493
+ lora_scale = 1.0
494
+
495
+ if USE_PEFT_BACKEND:
496
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
497
+ scale_lora_layers(self, lora_scale)
498
+ else:
499
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
500
+ logger.warning(
501
+ "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective."
502
+ )
503
+
504
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
505
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
506
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
507
+ # expects mask of shape:
508
+ # [batch, key_tokens]
509
+ # adds singleton query_tokens dimension:
510
+ # [batch, 1, key_tokens]
511
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
512
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
513
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
514
+ if attention_mask is not None and attention_mask.ndim == 2:
515
+ # assume that mask is expressed as:
516
+ # (1 = keep, 0 = discard)
517
+ # convert mask into a bias that can be added to attention scores:
518
+ # (keep = +0, discard = -10000.0)
519
+ attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
520
+ attention_mask = attention_mask.unsqueeze(1)
521
+
522
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
523
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
524
+ encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
525
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
526
+
527
+ # 1. Input
528
+ batch_size, num_channels, height, width = hidden_states.shape
529
+ p = self.config.patch_size
530
+ post_patch_height, post_patch_width = height // p, width // p
531
+
532
+ hidden_states = self.patch_embed(hidden_states)
533
+
534
+ if guidance is not None:
535
+ timestep, embedded_timestep = self.time_embed(
536
+ timestep, guidance=guidance, hidden_dtype=hidden_states.dtype
537
+ )
538
+ else:
539
+ timestep, embedded_timestep = self.time_embed(
540
+ timestep, batch_size=batch_size, hidden_dtype=hidden_states.dtype
541
+ )
542
+
543
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states)
544
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])
545
+
546
+ encoder_hidden_states = self.caption_norm(encoder_hidden_states)
547
+
548
+ # 2. Transformer blocks
549
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
550
+ for index_block, block in enumerate(self.transformer_blocks):
551
+ hidden_states = self._gradient_checkpointing_func(
552
+ block,
553
+ hidden_states,
554
+ attention_mask,
555
+ encoder_hidden_states,
556
+ encoder_attention_mask,
557
+ timestep,
558
+ post_patch_height,
559
+ post_patch_width,
560
+ )
561
+ if controlnet_block_samples is not None and 0 < index_block <= len(controlnet_block_samples):
562
+ hidden_states = hidden_states + controlnet_block_samples[index_block - 1]
563
+
564
+ else:
565
+ for index_block, block in enumerate(self.transformer_blocks):
566
+ hidden_states = block(
567
+ hidden_states,
568
+ attention_mask,
569
+ encoder_hidden_states,
570
+ encoder_attention_mask,
571
+ timestep,
572
+ post_patch_height,
573
+ post_patch_width,
574
+ )
575
+ if controlnet_block_samples is not None and 0 < index_block <= len(controlnet_block_samples):
576
+ hidden_states = hidden_states + controlnet_block_samples[index_block - 1]
577
+
578
+ # 3. Normalization
579
+ hidden_states = self.norm_out(hidden_states, embedded_timestep, self.scale_shift_table)
580
+
581
+ hidden_states = self.proj_out(hidden_states)
582
+
583
+ # 5. Unpatchify
584
+ hidden_states = hidden_states.reshape(
585
+ batch_size, post_patch_height, post_patch_width, self.config.patch_size, self.config.patch_size, -1
586
+ )
587
+ hidden_states = hidden_states.permute(0, 5, 1, 3, 2, 4)
588
+ output = hidden_states.reshape(batch_size, -1, post_patch_height * p, post_patch_width * p)
589
+
590
+ if USE_PEFT_BACKEND:
591
+ # remove `lora_scale` from each PEFT layer
592
+ unscale_lora_layers(self, lora_scale)
593
+
594
+ if not return_dict:
595
+ return (output,)
596
+
597
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/stable_audio_transformer.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Stability AI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Dict, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.utils.checkpoint
22
+
23
+ from ...configuration_utils import ConfigMixin, register_to_config
24
+ from ...utils import logging
25
+ from ...utils.torch_utils import maybe_allow_in_graph
26
+ from ..attention import FeedForward
27
+ from ..attention_processor import Attention, AttentionProcessor, StableAudioAttnProcessor2_0
28
+ from ..modeling_utils import ModelMixin
29
+ from ..transformers.transformer_2d import Transformer2DModelOutput
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+
35
+ class StableAudioGaussianFourierProjection(nn.Module):
36
+ """Gaussian Fourier embeddings for noise levels."""
37
+
38
+ # Copied from diffusers.models.embeddings.GaussianFourierProjection.__init__
39
+ def __init__(
40
+ self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False
41
+ ):
42
+ super().__init__()
43
+ self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
44
+ self.log = log
45
+ self.flip_sin_to_cos = flip_sin_to_cos
46
+
47
+ if set_W_to_weight:
48
+ # to delete later
49
+ del self.weight
50
+ self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
51
+ self.weight = self.W
52
+ del self.W
53
+
54
+ def forward(self, x):
55
+ if self.log:
56
+ x = torch.log(x)
57
+
58
+ x_proj = 2 * np.pi * x[:, None] @ self.weight[None, :]
59
+
60
+ if self.flip_sin_to_cos:
61
+ out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1)
62
+ else:
63
+ out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
64
+ return out
65
+
66
+
67
+ @maybe_allow_in_graph
68
+ class StableAudioDiTBlock(nn.Module):
69
+ r"""
70
+ Transformer block used in Stable Audio model (https://github.com/Stability-AI/stable-audio-tools). Allow skip
71
+ connection and QKNorm
72
+
73
+ Parameters:
74
+ dim (`int`): The number of channels in the input and output.
75
+ num_attention_heads (`int`): The number of heads to use for the query states.
76
+ num_key_value_attention_heads (`int`): The number of heads to use for the key and value states.
77
+ attention_head_dim (`int`): The number of channels in each head.
78
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
79
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
80
+ upcast_attention (`bool`, *optional*):
81
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
82
+ """
83
+
84
+ def __init__(
85
+ self,
86
+ dim: int,
87
+ num_attention_heads: int,
88
+ num_key_value_attention_heads: int,
89
+ attention_head_dim: int,
90
+ dropout=0.0,
91
+ cross_attention_dim: Optional[int] = None,
92
+ upcast_attention: bool = False,
93
+ norm_eps: float = 1e-5,
94
+ ff_inner_dim: Optional[int] = None,
95
+ ):
96
+ super().__init__()
97
+ # Define 3 blocks. Each block has its own normalization layer.
98
+ # 1. Self-Attn
99
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=True, eps=norm_eps)
100
+ self.attn1 = Attention(
101
+ query_dim=dim,
102
+ heads=num_attention_heads,
103
+ dim_head=attention_head_dim,
104
+ dropout=dropout,
105
+ bias=False,
106
+ upcast_attention=upcast_attention,
107
+ out_bias=False,
108
+ processor=StableAudioAttnProcessor2_0(),
109
+ )
110
+
111
+ # 2. Cross-Attn
112
+ self.norm2 = nn.LayerNorm(dim, norm_eps, True)
113
+
114
+ self.attn2 = Attention(
115
+ query_dim=dim,
116
+ cross_attention_dim=cross_attention_dim,
117
+ heads=num_attention_heads,
118
+ dim_head=attention_head_dim,
119
+ kv_heads=num_key_value_attention_heads,
120
+ dropout=dropout,
121
+ bias=False,
122
+ upcast_attention=upcast_attention,
123
+ out_bias=False,
124
+ processor=StableAudioAttnProcessor2_0(),
125
+ ) # is self-attn if encoder_hidden_states is none
126
+
127
+ # 3. Feed-forward
128
+ self.norm3 = nn.LayerNorm(dim, norm_eps, True)
129
+ self.ff = FeedForward(
130
+ dim,
131
+ dropout=dropout,
132
+ activation_fn="swiglu",
133
+ final_dropout=False,
134
+ inner_dim=ff_inner_dim,
135
+ bias=True,
136
+ )
137
+
138
+ # let chunk size default to None
139
+ self._chunk_size = None
140
+ self._chunk_dim = 0
141
+
142
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
143
+ # Sets chunk feed-forward
144
+ self._chunk_size = chunk_size
145
+ self._chunk_dim = dim
146
+
147
+ def forward(
148
+ self,
149
+ hidden_states: torch.Tensor,
150
+ attention_mask: Optional[torch.Tensor] = None,
151
+ encoder_hidden_states: Optional[torch.Tensor] = None,
152
+ encoder_attention_mask: Optional[torch.Tensor] = None,
153
+ rotary_embedding: Optional[torch.FloatTensor] = None,
154
+ ) -> torch.Tensor:
155
+ # Notice that normalization is always applied before the real computation in the following blocks.
156
+ # 0. Self-Attention
157
+ norm_hidden_states = self.norm1(hidden_states)
158
+
159
+ attn_output = self.attn1(
160
+ norm_hidden_states,
161
+ attention_mask=attention_mask,
162
+ rotary_emb=rotary_embedding,
163
+ )
164
+
165
+ hidden_states = attn_output + hidden_states
166
+
167
+ # 2. Cross-Attention
168
+ norm_hidden_states = self.norm2(hidden_states)
169
+
170
+ attn_output = self.attn2(
171
+ norm_hidden_states,
172
+ encoder_hidden_states=encoder_hidden_states,
173
+ attention_mask=encoder_attention_mask,
174
+ )
175
+ hidden_states = attn_output + hidden_states
176
+
177
+ # 3. Feed-forward
178
+ norm_hidden_states = self.norm3(hidden_states)
179
+ ff_output = self.ff(norm_hidden_states)
180
+
181
+ hidden_states = ff_output + hidden_states
182
+
183
+ return hidden_states
184
+
185
+
186
+ class StableAudioDiTModel(ModelMixin, ConfigMixin):
187
+ """
188
+ The Diffusion Transformer model introduced in Stable Audio.
189
+
190
+ Reference: https://github.com/Stability-AI/stable-audio-tools
191
+
192
+ Parameters:
193
+ sample_size ( `int`, *optional*, defaults to 1024): The size of the input sample.
194
+ in_channels (`int`, *optional*, defaults to 64): The number of channels in the input.
195
+ num_layers (`int`, *optional*, defaults to 24): The number of layers of Transformer blocks to use.
196
+ attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
197
+ num_attention_heads (`int`, *optional*, defaults to 24): The number of heads to use for the query states.
198
+ num_key_value_attention_heads (`int`, *optional*, defaults to 12):
199
+ The number of heads to use for the key and value states.
200
+ out_channels (`int`, defaults to 64): Number of output channels.
201
+ cross_attention_dim ( `int`, *optional*, defaults to 768): Dimension of the cross-attention projection.
202
+ time_proj_dim ( `int`, *optional*, defaults to 256): Dimension of the timestep inner projection.
203
+ global_states_input_dim ( `int`, *optional*, defaults to 1536):
204
+ Input dimension of the global hidden states projection.
205
+ cross_attention_input_dim ( `int`, *optional*, defaults to 768):
206
+ Input dimension of the cross-attention projection
207
+ """
208
+
209
+ _supports_gradient_checkpointing = True
210
+ _skip_layerwise_casting_patterns = ["preprocess_conv", "postprocess_conv", "^proj_in$", "^proj_out$", "norm"]
211
+
212
+ @register_to_config
213
+ def __init__(
214
+ self,
215
+ sample_size: int = 1024,
216
+ in_channels: int = 64,
217
+ num_layers: int = 24,
218
+ attention_head_dim: int = 64,
219
+ num_attention_heads: int = 24,
220
+ num_key_value_attention_heads: int = 12,
221
+ out_channels: int = 64,
222
+ cross_attention_dim: int = 768,
223
+ time_proj_dim: int = 256,
224
+ global_states_input_dim: int = 1536,
225
+ cross_attention_input_dim: int = 768,
226
+ ):
227
+ super().__init__()
228
+ self.sample_size = sample_size
229
+ self.out_channels = out_channels
230
+ self.inner_dim = num_attention_heads * attention_head_dim
231
+
232
+ self.time_proj = StableAudioGaussianFourierProjection(
233
+ embedding_size=time_proj_dim // 2,
234
+ flip_sin_to_cos=True,
235
+ log=False,
236
+ set_W_to_weight=False,
237
+ )
238
+
239
+ self.timestep_proj = nn.Sequential(
240
+ nn.Linear(time_proj_dim, self.inner_dim, bias=True),
241
+ nn.SiLU(),
242
+ nn.Linear(self.inner_dim, self.inner_dim, bias=True),
243
+ )
244
+
245
+ self.global_proj = nn.Sequential(
246
+ nn.Linear(global_states_input_dim, self.inner_dim, bias=False),
247
+ nn.SiLU(),
248
+ nn.Linear(self.inner_dim, self.inner_dim, bias=False),
249
+ )
250
+
251
+ self.cross_attention_proj = nn.Sequential(
252
+ nn.Linear(cross_attention_input_dim, cross_attention_dim, bias=False),
253
+ nn.SiLU(),
254
+ nn.Linear(cross_attention_dim, cross_attention_dim, bias=False),
255
+ )
256
+
257
+ self.preprocess_conv = nn.Conv1d(in_channels, in_channels, 1, bias=False)
258
+ self.proj_in = nn.Linear(in_channels, self.inner_dim, bias=False)
259
+
260
+ self.transformer_blocks = nn.ModuleList(
261
+ [
262
+ StableAudioDiTBlock(
263
+ dim=self.inner_dim,
264
+ num_attention_heads=num_attention_heads,
265
+ num_key_value_attention_heads=num_key_value_attention_heads,
266
+ attention_head_dim=attention_head_dim,
267
+ cross_attention_dim=cross_attention_dim,
268
+ )
269
+ for i in range(num_layers)
270
+ ]
271
+ )
272
+
273
+ self.proj_out = nn.Linear(self.inner_dim, self.out_channels, bias=False)
274
+ self.postprocess_conv = nn.Conv1d(self.out_channels, self.out_channels, 1, bias=False)
275
+
276
+ self.gradient_checkpointing = False
277
+
278
+ @property
279
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
280
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
281
+ r"""
282
+ Returns:
283
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
284
+ indexed by its weight name.
285
+ """
286
+ # set recursively
287
+ processors = {}
288
+
289
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
290
+ if hasattr(module, "get_processor"):
291
+ processors[f"{name}.processor"] = module.get_processor()
292
+
293
+ for sub_name, child in module.named_children():
294
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
295
+
296
+ return processors
297
+
298
+ for name, module in self.named_children():
299
+ fn_recursive_add_processors(name, module, processors)
300
+
301
+ return processors
302
+
303
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
304
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
305
+ r"""
306
+ Sets the attention processor to use to compute attention.
307
+
308
+ Parameters:
309
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
310
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
311
+ for **all** `Attention` layers.
312
+
313
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
314
+ processor. This is strongly recommended when setting trainable attention processors.
315
+
316
+ """
317
+ count = len(self.attn_processors.keys())
318
+
319
+ if isinstance(processor, dict) and len(processor) != count:
320
+ raise ValueError(
321
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
322
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
323
+ )
324
+
325
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
326
+ if hasattr(module, "set_processor"):
327
+ if not isinstance(processor, dict):
328
+ module.set_processor(processor)
329
+ else:
330
+ module.set_processor(processor.pop(f"{name}.processor"))
331
+
332
+ for sub_name, child in module.named_children():
333
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
334
+
335
+ for name, module in self.named_children():
336
+ fn_recursive_attn_processor(name, module, processor)
337
+
338
+ # Copied from diffusers.models.transformers.hunyuan_transformer_2d.HunyuanDiT2DModel.set_default_attn_processor with Hunyuan->StableAudio
339
+ def set_default_attn_processor(self):
340
+ """
341
+ Disables custom attention processors and sets the default attention implementation.
342
+ """
343
+ self.set_attn_processor(StableAudioAttnProcessor2_0())
344
+
345
+ def forward(
346
+ self,
347
+ hidden_states: torch.FloatTensor,
348
+ timestep: torch.LongTensor = None,
349
+ encoder_hidden_states: torch.FloatTensor = None,
350
+ global_hidden_states: torch.FloatTensor = None,
351
+ rotary_embedding: torch.FloatTensor = None,
352
+ return_dict: bool = True,
353
+ attention_mask: Optional[torch.LongTensor] = None,
354
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
355
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
356
+ """
357
+ The [`StableAudioDiTModel`] forward method.
358
+
359
+ Args:
360
+ hidden_states (`torch.FloatTensor` of shape `(batch size, in_channels, sequence_len)`):
361
+ Input `hidden_states`.
362
+ timestep ( `torch.LongTensor`):
363
+ Used to indicate denoising step.
364
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, encoder_sequence_len, cross_attention_input_dim)`):
365
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
366
+ global_hidden_states (`torch.FloatTensor` of shape `(batch size, global_sequence_len, global_states_input_dim)`):
367
+ Global embeddings that will be prepended to the hidden states.
368
+ rotary_embedding (`torch.Tensor`):
369
+ The rotary embeddings to apply on query and key tensors during attention calculation.
370
+ return_dict (`bool`, *optional*, defaults to `True`):
371
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
372
+ tuple.
373
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_len)`, *optional*):
374
+ Mask to avoid performing attention on padding token indices, formed by concatenating the attention
375
+ masks
376
+ for the two text encoders together. Mask values selected in `[0, 1]`:
377
+
378
+ - 1 for tokens that are **not masked**,
379
+ - 0 for tokens that are **masked**.
380
+ encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_len)`, *optional*):
381
+ Mask to avoid performing attention on padding token cross-attention indices, formed by concatenating
382
+ the attention masks
383
+ for the two text encoders together. Mask values selected in `[0, 1]`:
384
+
385
+ - 1 for tokens that are **not masked**,
386
+ - 0 for tokens that are **masked**.
387
+ Returns:
388
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
389
+ `tuple` where the first element is the sample tensor.
390
+ """
391
+ cross_attention_hidden_states = self.cross_attention_proj(encoder_hidden_states)
392
+ global_hidden_states = self.global_proj(global_hidden_states)
393
+ time_hidden_states = self.timestep_proj(self.time_proj(timestep.to(self.dtype)))
394
+
395
+ global_hidden_states = global_hidden_states + time_hidden_states.unsqueeze(1)
396
+
397
+ hidden_states = self.preprocess_conv(hidden_states) + hidden_states
398
+ # (batch_size, dim, sequence_length) -> (batch_size, sequence_length, dim)
399
+ hidden_states = hidden_states.transpose(1, 2)
400
+
401
+ hidden_states = self.proj_in(hidden_states)
402
+
403
+ # prepend global states to hidden states
404
+ hidden_states = torch.cat([global_hidden_states, hidden_states], dim=-2)
405
+ if attention_mask is not None:
406
+ prepend_mask = torch.ones((hidden_states.shape[0], 1), device=hidden_states.device, dtype=torch.bool)
407
+ attention_mask = torch.cat([prepend_mask, attention_mask], dim=-1)
408
+
409
+ for block in self.transformer_blocks:
410
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
411
+ hidden_states = self._gradient_checkpointing_func(
412
+ block,
413
+ hidden_states,
414
+ attention_mask,
415
+ cross_attention_hidden_states,
416
+ encoder_attention_mask,
417
+ rotary_embedding,
418
+ )
419
+
420
+ else:
421
+ hidden_states = block(
422
+ hidden_states=hidden_states,
423
+ attention_mask=attention_mask,
424
+ encoder_hidden_states=cross_attention_hidden_states,
425
+ encoder_attention_mask=encoder_attention_mask,
426
+ rotary_embedding=rotary_embedding,
427
+ )
428
+
429
+ hidden_states = self.proj_out(hidden_states)
430
+
431
+ # (batch_size, sequence_length, dim) -> (batch_size, dim, sequence_length)
432
+ # remove prepend length that has been added by global hidden states
433
+ hidden_states = hidden_states.transpose(1, 2)[:, :, 1:]
434
+ hidden_states = self.postprocess_conv(hidden_states) + hidden_states
435
+
436
+ if not return_dict:
437
+ return (hidden_states,)
438
+
439
+ return Transformer2DModelOutput(sample=hidden_states)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/t5_film_transformer.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import math
15
+ from typing import Optional, Tuple
16
+
17
+ import torch
18
+ from torch import nn
19
+
20
+ from ...configuration_utils import ConfigMixin, register_to_config
21
+ from ..attention_processor import Attention
22
+ from ..embeddings import get_timestep_embedding
23
+ from ..modeling_utils import ModelMixin
24
+
25
+
26
+ class T5FilmDecoder(ModelMixin, ConfigMixin):
27
+ r"""
28
+ T5 style decoder with FiLM conditioning.
29
+
30
+ Args:
31
+ input_dims (`int`, *optional*, defaults to `128`):
32
+ The number of input dimensions.
33
+ targets_length (`int`, *optional*, defaults to `256`):
34
+ The length of the targets.
35
+ d_model (`int`, *optional*, defaults to `768`):
36
+ Size of the input hidden states.
37
+ num_layers (`int`, *optional*, defaults to `12`):
38
+ The number of `DecoderLayer`'s to use.
39
+ num_heads (`int`, *optional*, defaults to `12`):
40
+ The number of attention heads to use.
41
+ d_kv (`int`, *optional*, defaults to `64`):
42
+ Size of the key-value projection vectors.
43
+ d_ff (`int`, *optional*, defaults to `2048`):
44
+ The number of dimensions in the intermediate feed-forward layer of `DecoderLayer`'s.
45
+ dropout_rate (`float`, *optional*, defaults to `0.1`):
46
+ Dropout probability.
47
+ """
48
+
49
+ @register_to_config
50
+ def __init__(
51
+ self,
52
+ input_dims: int = 128,
53
+ targets_length: int = 256,
54
+ max_decoder_noise_time: float = 2000.0,
55
+ d_model: int = 768,
56
+ num_layers: int = 12,
57
+ num_heads: int = 12,
58
+ d_kv: int = 64,
59
+ d_ff: int = 2048,
60
+ dropout_rate: float = 0.1,
61
+ ):
62
+ super().__init__()
63
+
64
+ self.conditioning_emb = nn.Sequential(
65
+ nn.Linear(d_model, d_model * 4, bias=False),
66
+ nn.SiLU(),
67
+ nn.Linear(d_model * 4, d_model * 4, bias=False),
68
+ nn.SiLU(),
69
+ )
70
+
71
+ self.position_encoding = nn.Embedding(targets_length, d_model)
72
+ self.position_encoding.weight.requires_grad = False
73
+
74
+ self.continuous_inputs_projection = nn.Linear(input_dims, d_model, bias=False)
75
+
76
+ self.dropout = nn.Dropout(p=dropout_rate)
77
+
78
+ self.decoders = nn.ModuleList()
79
+ for lyr_num in range(num_layers):
80
+ # FiLM conditional T5 decoder
81
+ lyr = DecoderLayer(d_model=d_model, d_kv=d_kv, num_heads=num_heads, d_ff=d_ff, dropout_rate=dropout_rate)
82
+ self.decoders.append(lyr)
83
+
84
+ self.decoder_norm = T5LayerNorm(d_model)
85
+
86
+ self.post_dropout = nn.Dropout(p=dropout_rate)
87
+ self.spec_out = nn.Linear(d_model, input_dims, bias=False)
88
+
89
+ def encoder_decoder_mask(self, query_input: torch.Tensor, key_input: torch.Tensor) -> torch.Tensor:
90
+ mask = torch.mul(query_input.unsqueeze(-1), key_input.unsqueeze(-2))
91
+ return mask.unsqueeze(-3)
92
+
93
+ def forward(self, encodings_and_masks, decoder_input_tokens, decoder_noise_time):
94
+ batch, _, _ = decoder_input_tokens.shape
95
+ assert decoder_noise_time.shape == (batch,)
96
+
97
+ # decoder_noise_time is in [0, 1), so rescale to expected timing range.
98
+ time_steps = get_timestep_embedding(
99
+ decoder_noise_time * self.config.max_decoder_noise_time,
100
+ embedding_dim=self.config.d_model,
101
+ max_period=self.config.max_decoder_noise_time,
102
+ ).to(dtype=self.dtype)
103
+
104
+ conditioning_emb = self.conditioning_emb(time_steps).unsqueeze(1)
105
+
106
+ assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
107
+
108
+ seq_length = decoder_input_tokens.shape[1]
109
+
110
+ # If we want to use relative positions for audio context, we can just offset
111
+ # this sequence by the length of encodings_and_masks.
112
+ decoder_positions = torch.broadcast_to(
113
+ torch.arange(seq_length, device=decoder_input_tokens.device),
114
+ (batch, seq_length),
115
+ )
116
+
117
+ position_encodings = self.position_encoding(decoder_positions)
118
+
119
+ inputs = self.continuous_inputs_projection(decoder_input_tokens)
120
+ inputs += position_encodings
121
+ y = self.dropout(inputs)
122
+
123
+ # decoder: No padding present.
124
+ decoder_mask = torch.ones(
125
+ decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype
126
+ )
127
+
128
+ # Translate encoding masks to encoder-decoder masks.
129
+ encodings_and_encdec_masks = [(x, self.encoder_decoder_mask(decoder_mask, y)) for x, y in encodings_and_masks]
130
+
131
+ # cross attend style: concat encodings
132
+ encoded = torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1)
133
+ encoder_decoder_mask = torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1)
134
+
135
+ for lyr in self.decoders:
136
+ y = lyr(
137
+ y,
138
+ conditioning_emb=conditioning_emb,
139
+ encoder_hidden_states=encoded,
140
+ encoder_attention_mask=encoder_decoder_mask,
141
+ )[0]
142
+
143
+ y = self.decoder_norm(y)
144
+ y = self.post_dropout(y)
145
+
146
+ spec_out = self.spec_out(y)
147
+ return spec_out
148
+
149
+
150
+ class DecoderLayer(nn.Module):
151
+ r"""
152
+ T5 decoder layer.
153
+
154
+ Args:
155
+ d_model (`int`):
156
+ Size of the input hidden states.
157
+ d_kv (`int`):
158
+ Size of the key-value projection vectors.
159
+ num_heads (`int`):
160
+ Number of attention heads.
161
+ d_ff (`int`):
162
+ Size of the intermediate feed-forward layer.
163
+ dropout_rate (`float`):
164
+ Dropout probability.
165
+ layer_norm_epsilon (`float`, *optional*, defaults to `1e-6`):
166
+ A small value used for numerical stability to avoid dividing by zero.
167
+ """
168
+
169
+ def __init__(
170
+ self, d_model: int, d_kv: int, num_heads: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float = 1e-6
171
+ ):
172
+ super().__init__()
173
+ self.layer = nn.ModuleList()
174
+
175
+ # cond self attention: layer 0
176
+ self.layer.append(
177
+ T5LayerSelfAttentionCond(d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate)
178
+ )
179
+
180
+ # cross attention: layer 1
181
+ self.layer.append(
182
+ T5LayerCrossAttention(
183
+ d_model=d_model,
184
+ d_kv=d_kv,
185
+ num_heads=num_heads,
186
+ dropout_rate=dropout_rate,
187
+ layer_norm_epsilon=layer_norm_epsilon,
188
+ )
189
+ )
190
+
191
+ # Film Cond MLP + dropout: last layer
192
+ self.layer.append(
193
+ T5LayerFFCond(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon)
194
+ )
195
+
196
+ def forward(
197
+ self,
198
+ hidden_states: torch.Tensor,
199
+ conditioning_emb: Optional[torch.Tensor] = None,
200
+ attention_mask: Optional[torch.Tensor] = None,
201
+ encoder_hidden_states: Optional[torch.Tensor] = None,
202
+ encoder_attention_mask: Optional[torch.Tensor] = None,
203
+ encoder_decoder_position_bias=None,
204
+ ) -> Tuple[torch.Tensor]:
205
+ hidden_states = self.layer[0](
206
+ hidden_states,
207
+ conditioning_emb=conditioning_emb,
208
+ attention_mask=attention_mask,
209
+ )
210
+
211
+ if encoder_hidden_states is not None:
212
+ encoder_extended_attention_mask = torch.where(encoder_attention_mask > 0, 0, -1e10).to(
213
+ encoder_hidden_states.dtype
214
+ )
215
+
216
+ hidden_states = self.layer[1](
217
+ hidden_states,
218
+ key_value_states=encoder_hidden_states,
219
+ attention_mask=encoder_extended_attention_mask,
220
+ )
221
+
222
+ # Apply Film Conditional Feed Forward layer
223
+ hidden_states = self.layer[-1](hidden_states, conditioning_emb)
224
+
225
+ return (hidden_states,)
226
+
227
+
228
+ class T5LayerSelfAttentionCond(nn.Module):
229
+ r"""
230
+ T5 style self-attention layer with conditioning.
231
+
232
+ Args:
233
+ d_model (`int`):
234
+ Size of the input hidden states.
235
+ d_kv (`int`):
236
+ Size of the key-value projection vectors.
237
+ num_heads (`int`):
238
+ Number of attention heads.
239
+ dropout_rate (`float`):
240
+ Dropout probability.
241
+ """
242
+
243
+ def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float):
244
+ super().__init__()
245
+ self.layer_norm = T5LayerNorm(d_model)
246
+ self.FiLMLayer = T5FiLMLayer(in_features=d_model * 4, out_features=d_model)
247
+ self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False)
248
+ self.dropout = nn.Dropout(dropout_rate)
249
+
250
+ def forward(
251
+ self,
252
+ hidden_states: torch.Tensor,
253
+ conditioning_emb: Optional[torch.Tensor] = None,
254
+ attention_mask: Optional[torch.Tensor] = None,
255
+ ) -> torch.Tensor:
256
+ # pre_self_attention_layer_norm
257
+ normed_hidden_states = self.layer_norm(hidden_states)
258
+
259
+ if conditioning_emb is not None:
260
+ normed_hidden_states = self.FiLMLayer(normed_hidden_states, conditioning_emb)
261
+
262
+ # Self-attention block
263
+ attention_output = self.attention(normed_hidden_states)
264
+
265
+ hidden_states = hidden_states + self.dropout(attention_output)
266
+
267
+ return hidden_states
268
+
269
+
270
+ class T5LayerCrossAttention(nn.Module):
271
+ r"""
272
+ T5 style cross-attention layer.
273
+
274
+ Args:
275
+ d_model (`int`):
276
+ Size of the input hidden states.
277
+ d_kv (`int`):
278
+ Size of the key-value projection vectors.
279
+ num_heads (`int`):
280
+ Number of attention heads.
281
+ dropout_rate (`float`):
282
+ Dropout probability.
283
+ layer_norm_epsilon (`float`):
284
+ A small value used for numerical stability to avoid dividing by zero.
285
+ """
286
+
287
+ def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float, layer_norm_epsilon: float):
288
+ super().__init__()
289
+ self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False)
290
+ self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon)
291
+ self.dropout = nn.Dropout(dropout_rate)
292
+
293
+ def forward(
294
+ self,
295
+ hidden_states: torch.Tensor,
296
+ key_value_states: Optional[torch.Tensor] = None,
297
+ attention_mask: Optional[torch.Tensor] = None,
298
+ ) -> torch.Tensor:
299
+ normed_hidden_states = self.layer_norm(hidden_states)
300
+ attention_output = self.attention(
301
+ normed_hidden_states,
302
+ encoder_hidden_states=key_value_states,
303
+ attention_mask=attention_mask.squeeze(1),
304
+ )
305
+ layer_output = hidden_states + self.dropout(attention_output)
306
+ return layer_output
307
+
308
+
309
+ class T5LayerFFCond(nn.Module):
310
+ r"""
311
+ T5 style feed-forward conditional layer.
312
+
313
+ Args:
314
+ d_model (`int`):
315
+ Size of the input hidden states.
316
+ d_ff (`int`):
317
+ Size of the intermediate feed-forward layer.
318
+ dropout_rate (`float`):
319
+ Dropout probability.
320
+ layer_norm_epsilon (`float`):
321
+ A small value used for numerical stability to avoid dividing by zero.
322
+ """
323
+
324
+ def __init__(self, d_model: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float):
325
+ super().__init__()
326
+ self.DenseReluDense = T5DenseGatedActDense(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate)
327
+ self.film = T5FiLMLayer(in_features=d_model * 4, out_features=d_model)
328
+ self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon)
329
+ self.dropout = nn.Dropout(dropout_rate)
330
+
331
+ def forward(self, hidden_states: torch.Tensor, conditioning_emb: Optional[torch.Tensor] = None) -> torch.Tensor:
332
+ forwarded_states = self.layer_norm(hidden_states)
333
+ if conditioning_emb is not None:
334
+ forwarded_states = self.film(forwarded_states, conditioning_emb)
335
+
336
+ forwarded_states = self.DenseReluDense(forwarded_states)
337
+ hidden_states = hidden_states + self.dropout(forwarded_states)
338
+ return hidden_states
339
+
340
+
341
+ class T5DenseGatedActDense(nn.Module):
342
+ r"""
343
+ T5 style feed-forward layer with gated activations and dropout.
344
+
345
+ Args:
346
+ d_model (`int`):
347
+ Size of the input hidden states.
348
+ d_ff (`int`):
349
+ Size of the intermediate feed-forward layer.
350
+ dropout_rate (`float`):
351
+ Dropout probability.
352
+ """
353
+
354
+ def __init__(self, d_model: int, d_ff: int, dropout_rate: float):
355
+ super().__init__()
356
+ self.wi_0 = nn.Linear(d_model, d_ff, bias=False)
357
+ self.wi_1 = nn.Linear(d_model, d_ff, bias=False)
358
+ self.wo = nn.Linear(d_ff, d_model, bias=False)
359
+ self.dropout = nn.Dropout(dropout_rate)
360
+ self.act = NewGELUActivation()
361
+
362
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
363
+ hidden_gelu = self.act(self.wi_0(hidden_states))
364
+ hidden_linear = self.wi_1(hidden_states)
365
+ hidden_states = hidden_gelu * hidden_linear
366
+ hidden_states = self.dropout(hidden_states)
367
+
368
+ hidden_states = self.wo(hidden_states)
369
+ return hidden_states
370
+
371
+
372
+ class T5LayerNorm(nn.Module):
373
+ r"""
374
+ T5 style layer normalization module.
375
+
376
+ Args:
377
+ hidden_size (`int`):
378
+ Size of the input hidden states.
379
+ eps (`float`, `optional`, defaults to `1e-6`):
380
+ A small value used for numerical stability to avoid dividing by zero.
381
+ """
382
+
383
+ def __init__(self, hidden_size: int, eps: float = 1e-6):
384
+ """
385
+ Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
386
+ """
387
+ super().__init__()
388
+ self.weight = nn.Parameter(torch.ones(hidden_size))
389
+ self.variance_epsilon = eps
390
+
391
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
392
+ # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
393
+ # Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated
394
+ # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
395
+ # half-precision inputs is done in fp32
396
+
397
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
398
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
399
+
400
+ # convert into half-precision if necessary
401
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
402
+ hidden_states = hidden_states.to(self.weight.dtype)
403
+
404
+ return self.weight * hidden_states
405
+
406
+
407
+ class NewGELUActivation(nn.Module):
408
+ """
409
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
410
+ the Gaussian Error Linear Units paper: https://huggingface.co/papers/1606.08415
411
+ """
412
+
413
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
414
+ return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
415
+
416
+
417
+ class T5FiLMLayer(nn.Module):
418
+ """
419
+ T5 style FiLM Layer.
420
+
421
+ Args:
422
+ in_features (`int`):
423
+ Number of input features.
424
+ out_features (`int`):
425
+ Number of output features.
426
+ """
427
+
428
+ def __init__(self, in_features: int, out_features: int):
429
+ super().__init__()
430
+ self.scale_bias = nn.Linear(in_features, out_features * 2, bias=False)
431
+
432
+ def forward(self, x: torch.Tensor, conditioning_emb: torch.Tensor) -> torch.Tensor:
433
+ emb = self.scale_bias(conditioning_emb)
434
+ scale, shift = torch.chunk(emb, 2, -1)
435
+ x = x * (1 + scale) + shift
436
+ return x
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_2d.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Dict, Optional
15
+
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+
20
+ from ...configuration_utils import LegacyConfigMixin, register_to_config
21
+ from ...utils import deprecate, logging
22
+ from ..attention import BasicTransformerBlock
23
+ from ..embeddings import ImagePositionalEmbeddings, PatchEmbed, PixArtAlphaTextProjection
24
+ from ..modeling_outputs import Transformer2DModelOutput
25
+ from ..modeling_utils import LegacyModelMixin
26
+ from ..normalization import AdaLayerNormSingle
27
+
28
+
29
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
30
+
31
+
32
+ class Transformer2DModelOutput(Transformer2DModelOutput):
33
+ def __init__(self, *args, **kwargs):
34
+ deprecation_message = "Importing `Transformer2DModelOutput` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.modeling_outputs import Transformer2DModelOutput`, instead."
35
+ deprecate("Transformer2DModelOutput", "1.0.0", deprecation_message)
36
+ super().__init__(*args, **kwargs)
37
+
38
+
39
+ class Transformer2DModel(LegacyModelMixin, LegacyConfigMixin):
40
+ """
41
+ A 2D Transformer model for image-like data.
42
+
43
+ Parameters:
44
+ num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
45
+ attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
46
+ in_channels (`int`, *optional*):
47
+ The number of channels in the input and output (specify if the input is **continuous**).
48
+ num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
49
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
50
+ cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
51
+ sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
52
+ This is fixed during training since it is used to learn a number of position embeddings.
53
+ num_vector_embeds (`int`, *optional*):
54
+ The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**).
55
+ Includes the class for the masked latent pixel.
56
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward.
57
+ num_embeds_ada_norm ( `int`, *optional*):
58
+ The number of diffusion steps used during training. Pass if at least one of the norm_layers is
59
+ `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are
60
+ added to the hidden states.
61
+
62
+ During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`.
63
+ attention_bias (`bool`, *optional*):
64
+ Configure if the `TransformerBlocks` attention should contain a bias parameter.
65
+ """
66
+
67
+ _supports_gradient_checkpointing = True
68
+ _no_split_modules = ["BasicTransformerBlock"]
69
+ _skip_layerwise_casting_patterns = ["latent_image_embedding", "norm"]
70
+
71
+ @register_to_config
72
+ def __init__(
73
+ self,
74
+ num_attention_heads: int = 16,
75
+ attention_head_dim: int = 88,
76
+ in_channels: Optional[int] = None,
77
+ out_channels: Optional[int] = None,
78
+ num_layers: int = 1,
79
+ dropout: float = 0.0,
80
+ norm_num_groups: int = 32,
81
+ cross_attention_dim: Optional[int] = None,
82
+ attention_bias: bool = False,
83
+ sample_size: Optional[int] = None,
84
+ num_vector_embeds: Optional[int] = None,
85
+ patch_size: Optional[int] = None,
86
+ activation_fn: str = "geglu",
87
+ num_embeds_ada_norm: Optional[int] = None,
88
+ use_linear_projection: bool = False,
89
+ only_cross_attention: bool = False,
90
+ double_self_attention: bool = False,
91
+ upcast_attention: bool = False,
92
+ norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen'
93
+ norm_elementwise_affine: bool = True,
94
+ norm_eps: float = 1e-5,
95
+ attention_type: str = "default",
96
+ caption_channels: int = None,
97
+ interpolation_scale: float = None,
98
+ use_additional_conditions: Optional[bool] = None,
99
+ ):
100
+ super().__init__()
101
+
102
+ # Validate inputs.
103
+ if patch_size is not None:
104
+ if norm_type not in ["ada_norm", "ada_norm_zero", "ada_norm_single"]:
105
+ raise NotImplementedError(
106
+ f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'."
107
+ )
108
+ elif norm_type in ["ada_norm", "ada_norm_zero"] and num_embeds_ada_norm is None:
109
+ raise ValueError(
110
+ f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None."
111
+ )
112
+
113
+ # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`
114
+ # Define whether input is continuous or discrete depending on configuration
115
+ self.is_input_continuous = (in_channels is not None) and (patch_size is None)
116
+ self.is_input_vectorized = num_vector_embeds is not None
117
+ self.is_input_patches = in_channels is not None and patch_size is not None
118
+
119
+ if self.is_input_continuous and self.is_input_vectorized:
120
+ raise ValueError(
121
+ f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make"
122
+ " sure that either `in_channels` or `num_vector_embeds` is None."
123
+ )
124
+ elif self.is_input_vectorized and self.is_input_patches:
125
+ raise ValueError(
126
+ f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make"
127
+ " sure that either `num_vector_embeds` or `num_patches` is None."
128
+ )
129
+ elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches:
130
+ raise ValueError(
131
+ f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:"
132
+ f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None."
133
+ )
134
+
135
+ if norm_type == "layer_norm" and num_embeds_ada_norm is not None:
136
+ deprecation_message = (
137
+ f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or"
138
+ " incorrectly set to `'layer_norm'`. Make sure to set `norm_type` to `'ada_norm'` in the config."
139
+ " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect"
140
+ " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it"
141
+ " would be very nice if you could open a Pull request for the `transformer/config.json` file"
142
+ )
143
+ deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False)
144
+ norm_type = "ada_norm"
145
+
146
+ # Set some common variables used across the board.
147
+ self.use_linear_projection = use_linear_projection
148
+ self.interpolation_scale = interpolation_scale
149
+ self.caption_channels = caption_channels
150
+ self.num_attention_heads = num_attention_heads
151
+ self.attention_head_dim = attention_head_dim
152
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
153
+ self.in_channels = in_channels
154
+ self.out_channels = in_channels if out_channels is None else out_channels
155
+ self.gradient_checkpointing = False
156
+
157
+ if use_additional_conditions is None:
158
+ if norm_type == "ada_norm_single" and sample_size == 128:
159
+ use_additional_conditions = True
160
+ else:
161
+ use_additional_conditions = False
162
+ self.use_additional_conditions = use_additional_conditions
163
+
164
+ # 2. Initialize the right blocks.
165
+ # These functions follow a common structure:
166
+ # a. Initialize the input blocks. b. Initialize the transformer blocks.
167
+ # c. Initialize the output blocks and other projection blocks when necessary.
168
+ if self.is_input_continuous:
169
+ self._init_continuous_input(norm_type=norm_type)
170
+ elif self.is_input_vectorized:
171
+ self._init_vectorized_inputs(norm_type=norm_type)
172
+ elif self.is_input_patches:
173
+ self._init_patched_inputs(norm_type=norm_type)
174
+
175
+ def _init_continuous_input(self, norm_type):
176
+ self.norm = torch.nn.GroupNorm(
177
+ num_groups=self.config.norm_num_groups, num_channels=self.in_channels, eps=1e-6, affine=True
178
+ )
179
+ if self.use_linear_projection:
180
+ self.proj_in = torch.nn.Linear(self.in_channels, self.inner_dim)
181
+ else:
182
+ self.proj_in = torch.nn.Conv2d(self.in_channels, self.inner_dim, kernel_size=1, stride=1, padding=0)
183
+
184
+ self.transformer_blocks = nn.ModuleList(
185
+ [
186
+ BasicTransformerBlock(
187
+ self.inner_dim,
188
+ self.config.num_attention_heads,
189
+ self.config.attention_head_dim,
190
+ dropout=self.config.dropout,
191
+ cross_attention_dim=self.config.cross_attention_dim,
192
+ activation_fn=self.config.activation_fn,
193
+ num_embeds_ada_norm=self.config.num_embeds_ada_norm,
194
+ attention_bias=self.config.attention_bias,
195
+ only_cross_attention=self.config.only_cross_attention,
196
+ double_self_attention=self.config.double_self_attention,
197
+ upcast_attention=self.config.upcast_attention,
198
+ norm_type=norm_type,
199
+ norm_elementwise_affine=self.config.norm_elementwise_affine,
200
+ norm_eps=self.config.norm_eps,
201
+ attention_type=self.config.attention_type,
202
+ )
203
+ for _ in range(self.config.num_layers)
204
+ ]
205
+ )
206
+
207
+ if self.use_linear_projection:
208
+ self.proj_out = torch.nn.Linear(self.inner_dim, self.out_channels)
209
+ else:
210
+ self.proj_out = torch.nn.Conv2d(self.inner_dim, self.out_channels, kernel_size=1, stride=1, padding=0)
211
+
212
+ def _init_vectorized_inputs(self, norm_type):
213
+ assert self.config.sample_size is not None, "Transformer2DModel over discrete input must provide sample_size"
214
+ assert self.config.num_vector_embeds is not None, (
215
+ "Transformer2DModel over discrete input must provide num_embed"
216
+ )
217
+
218
+ self.height = self.config.sample_size
219
+ self.width = self.config.sample_size
220
+ self.num_latent_pixels = self.height * self.width
221
+
222
+ self.latent_image_embedding = ImagePositionalEmbeddings(
223
+ num_embed=self.config.num_vector_embeds, embed_dim=self.inner_dim, height=self.height, width=self.width
224
+ )
225
+
226
+ self.transformer_blocks = nn.ModuleList(
227
+ [
228
+ BasicTransformerBlock(
229
+ self.inner_dim,
230
+ self.config.num_attention_heads,
231
+ self.config.attention_head_dim,
232
+ dropout=self.config.dropout,
233
+ cross_attention_dim=self.config.cross_attention_dim,
234
+ activation_fn=self.config.activation_fn,
235
+ num_embeds_ada_norm=self.config.num_embeds_ada_norm,
236
+ attention_bias=self.config.attention_bias,
237
+ only_cross_attention=self.config.only_cross_attention,
238
+ double_self_attention=self.config.double_self_attention,
239
+ upcast_attention=self.config.upcast_attention,
240
+ norm_type=norm_type,
241
+ norm_elementwise_affine=self.config.norm_elementwise_affine,
242
+ norm_eps=self.config.norm_eps,
243
+ attention_type=self.config.attention_type,
244
+ )
245
+ for _ in range(self.config.num_layers)
246
+ ]
247
+ )
248
+
249
+ self.norm_out = nn.LayerNorm(self.inner_dim)
250
+ self.out = nn.Linear(self.inner_dim, self.config.num_vector_embeds - 1)
251
+
252
+ def _init_patched_inputs(self, norm_type):
253
+ assert self.config.sample_size is not None, "Transformer2DModel over patched input must provide sample_size"
254
+
255
+ self.height = self.config.sample_size
256
+ self.width = self.config.sample_size
257
+
258
+ self.patch_size = self.config.patch_size
259
+ interpolation_scale = (
260
+ self.config.interpolation_scale
261
+ if self.config.interpolation_scale is not None
262
+ else max(self.config.sample_size // 64, 1)
263
+ )
264
+ self.pos_embed = PatchEmbed(
265
+ height=self.config.sample_size,
266
+ width=self.config.sample_size,
267
+ patch_size=self.config.patch_size,
268
+ in_channels=self.in_channels,
269
+ embed_dim=self.inner_dim,
270
+ interpolation_scale=interpolation_scale,
271
+ )
272
+
273
+ self.transformer_blocks = nn.ModuleList(
274
+ [
275
+ BasicTransformerBlock(
276
+ self.inner_dim,
277
+ self.config.num_attention_heads,
278
+ self.config.attention_head_dim,
279
+ dropout=self.config.dropout,
280
+ cross_attention_dim=self.config.cross_attention_dim,
281
+ activation_fn=self.config.activation_fn,
282
+ num_embeds_ada_norm=self.config.num_embeds_ada_norm,
283
+ attention_bias=self.config.attention_bias,
284
+ only_cross_attention=self.config.only_cross_attention,
285
+ double_self_attention=self.config.double_self_attention,
286
+ upcast_attention=self.config.upcast_attention,
287
+ norm_type=norm_type,
288
+ norm_elementwise_affine=self.config.norm_elementwise_affine,
289
+ norm_eps=self.config.norm_eps,
290
+ attention_type=self.config.attention_type,
291
+ )
292
+ for _ in range(self.config.num_layers)
293
+ ]
294
+ )
295
+
296
+ if self.config.norm_type != "ada_norm_single":
297
+ self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6)
298
+ self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim)
299
+ self.proj_out_2 = nn.Linear(
300
+ self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels
301
+ )
302
+ elif self.config.norm_type == "ada_norm_single":
303
+ self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6)
304
+ self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5)
305
+ self.proj_out = nn.Linear(
306
+ self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels
307
+ )
308
+
309
+ # PixArt-Alpha blocks.
310
+ self.adaln_single = None
311
+ if self.config.norm_type == "ada_norm_single":
312
+ # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use
313
+ # additional conditions until we find better name
314
+ self.adaln_single = AdaLayerNormSingle(
315
+ self.inner_dim, use_additional_conditions=self.use_additional_conditions
316
+ )
317
+
318
+ self.caption_projection = None
319
+ if self.caption_channels is not None:
320
+ self.caption_projection = PixArtAlphaTextProjection(
321
+ in_features=self.caption_channels, hidden_size=self.inner_dim
322
+ )
323
+
324
+ def forward(
325
+ self,
326
+ hidden_states: torch.Tensor,
327
+ encoder_hidden_states: Optional[torch.Tensor] = None,
328
+ timestep: Optional[torch.LongTensor] = None,
329
+ added_cond_kwargs: Dict[str, torch.Tensor] = None,
330
+ class_labels: Optional[torch.LongTensor] = None,
331
+ cross_attention_kwargs: Dict[str, Any] = None,
332
+ attention_mask: Optional[torch.Tensor] = None,
333
+ encoder_attention_mask: Optional[torch.Tensor] = None,
334
+ return_dict: bool = True,
335
+ ):
336
+ """
337
+ The [`Transformer2DModel`] forward method.
338
+
339
+ Args:
340
+ hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous):
341
+ Input `hidden_states`.
342
+ encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
343
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
344
+ self-attention.
345
+ timestep ( `torch.LongTensor`, *optional*):
346
+ Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
347
+ class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
348
+ Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
349
+ `AdaLayerZeroNorm`.
350
+ cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
351
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
352
+ `self.processor` in
353
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
354
+ attention_mask ( `torch.Tensor`, *optional*):
355
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
356
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
357
+ negative values to the attention scores corresponding to "discard" tokens.
358
+ encoder_attention_mask ( `torch.Tensor`, *optional*):
359
+ Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
360
+
361
+ * Mask `(batch, sequence_length)` True = keep, False = discard.
362
+ * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
363
+
364
+ If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
365
+ above. This bias will be added to the cross-attention scores.
366
+ return_dict (`bool`, *optional*, defaults to `True`):
367
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
368
+ tuple.
369
+
370
+ Returns:
371
+ If `return_dict` is True, an [`~models.transformers.transformer_2d.Transformer2DModelOutput`] is returned,
372
+ otherwise a `tuple` where the first element is the sample tensor.
373
+ """
374
+ if cross_attention_kwargs is not None:
375
+ if cross_attention_kwargs.get("scale", None) is not None:
376
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
377
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
378
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
379
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
380
+ # expects mask of shape:
381
+ # [batch, key_tokens]
382
+ # adds singleton query_tokens dimension:
383
+ # [batch, 1, key_tokens]
384
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
385
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
386
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
387
+ if attention_mask is not None and attention_mask.ndim == 2:
388
+ # assume that mask is expressed as:
389
+ # (1 = keep, 0 = discard)
390
+ # convert mask into a bias that can be added to attention scores:
391
+ # (keep = +0, discard = -10000.0)
392
+ attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
393
+ attention_mask = attention_mask.unsqueeze(1)
394
+
395
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
396
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
397
+ encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
398
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
399
+
400
+ # 1. Input
401
+ if self.is_input_continuous:
402
+ batch_size, _, height, width = hidden_states.shape
403
+ residual = hidden_states
404
+ hidden_states, inner_dim = self._operate_on_continuous_inputs(hidden_states)
405
+ elif self.is_input_vectorized:
406
+ hidden_states = self.latent_image_embedding(hidden_states)
407
+ elif self.is_input_patches:
408
+ height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size
409
+ hidden_states, encoder_hidden_states, timestep, embedded_timestep = self._operate_on_patched_inputs(
410
+ hidden_states, encoder_hidden_states, timestep, added_cond_kwargs
411
+ )
412
+
413
+ # 2. Blocks
414
+ for block in self.transformer_blocks:
415
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
416
+ hidden_states = self._gradient_checkpointing_func(
417
+ block,
418
+ hidden_states,
419
+ attention_mask,
420
+ encoder_hidden_states,
421
+ encoder_attention_mask,
422
+ timestep,
423
+ cross_attention_kwargs,
424
+ class_labels,
425
+ )
426
+ else:
427
+ hidden_states = block(
428
+ hidden_states,
429
+ attention_mask=attention_mask,
430
+ encoder_hidden_states=encoder_hidden_states,
431
+ encoder_attention_mask=encoder_attention_mask,
432
+ timestep=timestep,
433
+ cross_attention_kwargs=cross_attention_kwargs,
434
+ class_labels=class_labels,
435
+ )
436
+
437
+ # 3. Output
438
+ if self.is_input_continuous:
439
+ output = self._get_output_for_continuous_inputs(
440
+ hidden_states=hidden_states,
441
+ residual=residual,
442
+ batch_size=batch_size,
443
+ height=height,
444
+ width=width,
445
+ inner_dim=inner_dim,
446
+ )
447
+ elif self.is_input_vectorized:
448
+ output = self._get_output_for_vectorized_inputs(hidden_states)
449
+ elif self.is_input_patches:
450
+ output = self._get_output_for_patched_inputs(
451
+ hidden_states=hidden_states,
452
+ timestep=timestep,
453
+ class_labels=class_labels,
454
+ embedded_timestep=embedded_timestep,
455
+ height=height,
456
+ width=width,
457
+ )
458
+
459
+ if not return_dict:
460
+ return (output,)
461
+
462
+ return Transformer2DModelOutput(sample=output)
463
+
464
+ def _operate_on_continuous_inputs(self, hidden_states):
465
+ batch, _, height, width = hidden_states.shape
466
+ hidden_states = self.norm(hidden_states)
467
+
468
+ if not self.use_linear_projection:
469
+ hidden_states = self.proj_in(hidden_states)
470
+ inner_dim = hidden_states.shape[1]
471
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
472
+ else:
473
+ inner_dim = hidden_states.shape[1]
474
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
475
+ hidden_states = self.proj_in(hidden_states)
476
+
477
+ return hidden_states, inner_dim
478
+
479
+ def _operate_on_patched_inputs(self, hidden_states, encoder_hidden_states, timestep, added_cond_kwargs):
480
+ batch_size = hidden_states.shape[0]
481
+ hidden_states = self.pos_embed(hidden_states)
482
+ embedded_timestep = None
483
+
484
+ if self.adaln_single is not None:
485
+ if self.use_additional_conditions and added_cond_kwargs is None:
486
+ raise ValueError(
487
+ "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`."
488
+ )
489
+ timestep, embedded_timestep = self.adaln_single(
490
+ timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype
491
+ )
492
+
493
+ if self.caption_projection is not None:
494
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states)
495
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])
496
+
497
+ return hidden_states, encoder_hidden_states, timestep, embedded_timestep
498
+
499
+ def _get_output_for_continuous_inputs(self, hidden_states, residual, batch_size, height, width, inner_dim):
500
+ if not self.use_linear_projection:
501
+ hidden_states = (
502
+ hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
503
+ )
504
+ hidden_states = self.proj_out(hidden_states)
505
+ else:
506
+ hidden_states = self.proj_out(hidden_states)
507
+ hidden_states = (
508
+ hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
509
+ )
510
+
511
+ output = hidden_states + residual
512
+ return output
513
+
514
+ def _get_output_for_vectorized_inputs(self, hidden_states):
515
+ hidden_states = self.norm_out(hidden_states)
516
+ logits = self.out(hidden_states)
517
+ # (batch, self.num_vector_embeds - 1, self.num_latent_pixels)
518
+ logits = logits.permute(0, 2, 1)
519
+ # log(p(x_0))
520
+ output = F.log_softmax(logits.double(), dim=1).float()
521
+ return output
522
+
523
+ def _get_output_for_patched_inputs(
524
+ self, hidden_states, timestep, class_labels, embedded_timestep, height=None, width=None
525
+ ):
526
+ if self.config.norm_type != "ada_norm_single":
527
+ conditioning = self.transformer_blocks[0].norm1.emb(
528
+ timestep, class_labels, hidden_dtype=hidden_states.dtype
529
+ )
530
+ shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)
531
+ hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]
532
+ hidden_states = self.proj_out_2(hidden_states)
533
+ elif self.config.norm_type == "ada_norm_single":
534
+ shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1)
535
+ hidden_states = self.norm_out(hidden_states)
536
+ # Modulation
537
+ hidden_states = hidden_states * (1 + scale) + shift
538
+ hidden_states = self.proj_out(hidden_states)
539
+ hidden_states = hidden_states.squeeze(1)
540
+
541
+ # unpatchify
542
+ if self.adaln_single is None:
543
+ height = width = int(hidden_states.shape[1] ** 0.5)
544
+ hidden_states = hidden_states.reshape(
545
+ shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
546
+ )
547
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
548
+ output = hidden_states.reshape(
549
+ shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
550
+ )
551
+ return output
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_allegro.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The RhymesAI and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Optional, Tuple
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...utils import logging
24
+ from ...utils.torch_utils import maybe_allow_in_graph
25
+ from ..attention import FeedForward
26
+ from ..attention_processor import AllegroAttnProcessor2_0, Attention
27
+ from ..cache_utils import CacheMixin
28
+ from ..embeddings import PatchEmbed, PixArtAlphaTextProjection
29
+ from ..modeling_outputs import Transformer2DModelOutput
30
+ from ..modeling_utils import ModelMixin
31
+ from ..normalization import AdaLayerNormSingle
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ @maybe_allow_in_graph
38
+ class AllegroTransformerBlock(nn.Module):
39
+ r"""
40
+ Transformer block used in [Allegro](https://github.com/rhymes-ai/Allegro) model.
41
+
42
+ Args:
43
+ dim (`int`):
44
+ The number of channels in the input and output.
45
+ num_attention_heads (`int`):
46
+ The number of heads to use for multi-head attention.
47
+ attention_head_dim (`int`):
48
+ The number of channels in each head.
49
+ dropout (`float`, defaults to `0.0`):
50
+ The dropout probability to use.
51
+ cross_attention_dim (`int`, defaults to `2304`):
52
+ The dimension of the cross attention features.
53
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
54
+ Activation function to be used in feed-forward.
55
+ attention_bias (`bool`, defaults to `False`):
56
+ Whether or not to use bias in attention projection layers.
57
+ only_cross_attention (`bool`, defaults to `False`):
58
+ norm_elementwise_affine (`bool`, defaults to `True`):
59
+ Whether to use learnable elementwise affine parameters for normalization.
60
+ norm_eps (`float`, defaults to `1e-5`):
61
+ Epsilon value for normalization layers.
62
+ final_dropout (`bool` defaults to `False`):
63
+ Whether to apply a final dropout after the last feed-forward layer.
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ dim: int,
69
+ num_attention_heads: int,
70
+ attention_head_dim: int,
71
+ dropout=0.0,
72
+ cross_attention_dim: Optional[int] = None,
73
+ activation_fn: str = "geglu",
74
+ attention_bias: bool = False,
75
+ norm_elementwise_affine: bool = True,
76
+ norm_eps: float = 1e-5,
77
+ ):
78
+ super().__init__()
79
+
80
+ # 1. Self Attention
81
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
82
+
83
+ self.attn1 = Attention(
84
+ query_dim=dim,
85
+ heads=num_attention_heads,
86
+ dim_head=attention_head_dim,
87
+ dropout=dropout,
88
+ bias=attention_bias,
89
+ cross_attention_dim=None,
90
+ processor=AllegroAttnProcessor2_0(),
91
+ )
92
+
93
+ # 2. Cross Attention
94
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
95
+ self.attn2 = Attention(
96
+ query_dim=dim,
97
+ cross_attention_dim=cross_attention_dim,
98
+ heads=num_attention_heads,
99
+ dim_head=attention_head_dim,
100
+ dropout=dropout,
101
+ bias=attention_bias,
102
+ processor=AllegroAttnProcessor2_0(),
103
+ )
104
+
105
+ # 3. Feed Forward
106
+ self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
107
+
108
+ self.ff = FeedForward(
109
+ dim,
110
+ dropout=dropout,
111
+ activation_fn=activation_fn,
112
+ )
113
+
114
+ # 4. Scale-shift
115
+ self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
116
+
117
+ def forward(
118
+ self,
119
+ hidden_states: torch.Tensor,
120
+ encoder_hidden_states: Optional[torch.Tensor] = None,
121
+ temb: Optional[torch.LongTensor] = None,
122
+ attention_mask: Optional[torch.Tensor] = None,
123
+ encoder_attention_mask: Optional[torch.Tensor] = None,
124
+ image_rotary_emb=None,
125
+ ) -> torch.Tensor:
126
+ # 0. Self-Attention
127
+ batch_size = hidden_states.shape[0]
128
+
129
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
130
+ self.scale_shift_table[None] + temb.reshape(batch_size, 6, -1)
131
+ ).chunk(6, dim=1)
132
+ norm_hidden_states = self.norm1(hidden_states)
133
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
134
+ norm_hidden_states = norm_hidden_states.squeeze(1)
135
+
136
+ attn_output = self.attn1(
137
+ norm_hidden_states,
138
+ encoder_hidden_states=None,
139
+ attention_mask=attention_mask,
140
+ image_rotary_emb=image_rotary_emb,
141
+ )
142
+ attn_output = gate_msa * attn_output
143
+
144
+ hidden_states = attn_output + hidden_states
145
+ if hidden_states.ndim == 4:
146
+ hidden_states = hidden_states.squeeze(1)
147
+
148
+ # 1. Cross-Attention
149
+ if self.attn2 is not None:
150
+ norm_hidden_states = hidden_states
151
+
152
+ attn_output = self.attn2(
153
+ norm_hidden_states,
154
+ encoder_hidden_states=encoder_hidden_states,
155
+ attention_mask=encoder_attention_mask,
156
+ image_rotary_emb=None,
157
+ )
158
+ hidden_states = attn_output + hidden_states
159
+
160
+ # 2. Feed-forward
161
+ norm_hidden_states = self.norm2(hidden_states)
162
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
163
+
164
+ ff_output = self.ff(norm_hidden_states)
165
+ ff_output = gate_mlp * ff_output
166
+
167
+ hidden_states = ff_output + hidden_states
168
+
169
+ # TODO(aryan): maybe following line is not required
170
+ if hidden_states.ndim == 4:
171
+ hidden_states = hidden_states.squeeze(1)
172
+
173
+ return hidden_states
174
+
175
+
176
+ class AllegroTransformer3DModel(ModelMixin, ConfigMixin, CacheMixin):
177
+ _supports_gradient_checkpointing = True
178
+
179
+ """
180
+ A 3D Transformer model for video-like data.
181
+
182
+ Args:
183
+ patch_size (`int`, defaults to `2`):
184
+ The size of spatial patches to use in the patch embedding layer.
185
+ patch_size_t (`int`, defaults to `1`):
186
+ The size of temporal patches to use in the patch embedding layer.
187
+ num_attention_heads (`int`, defaults to `24`):
188
+ The number of heads to use for multi-head attention.
189
+ attention_head_dim (`int`, defaults to `96`):
190
+ The number of channels in each head.
191
+ in_channels (`int`, defaults to `4`):
192
+ The number of channels in the input.
193
+ out_channels (`int`, *optional*, defaults to `4`):
194
+ The number of channels in the output.
195
+ num_layers (`int`, defaults to `32`):
196
+ The number of layers of Transformer blocks to use.
197
+ dropout (`float`, defaults to `0.0`):
198
+ The dropout probability to use.
199
+ cross_attention_dim (`int`, defaults to `2304`):
200
+ The dimension of the cross attention features.
201
+ attention_bias (`bool`, defaults to `True`):
202
+ Whether or not to use bias in the attention projection layers.
203
+ sample_height (`int`, defaults to `90`):
204
+ The height of the input latents.
205
+ sample_width (`int`, defaults to `160`):
206
+ The width of the input latents.
207
+ sample_frames (`int`, defaults to `22`):
208
+ The number of frames in the input latents.
209
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
210
+ Activation function to use in feed-forward.
211
+ norm_elementwise_affine (`bool`, defaults to `False`):
212
+ Whether or not to use elementwise affine in normalization layers.
213
+ norm_eps (`float`, defaults to `1e-6`):
214
+ The epsilon value to use in normalization layers.
215
+ caption_channels (`int`, defaults to `4096`):
216
+ Number of channels to use for projecting the caption embeddings.
217
+ interpolation_scale_h (`float`, defaults to `2.0`):
218
+ Scaling factor to apply in 3D positional embeddings across height dimension.
219
+ interpolation_scale_w (`float`, defaults to `2.0`):
220
+ Scaling factor to apply in 3D positional embeddings across width dimension.
221
+ interpolation_scale_t (`float`, defaults to `2.2`):
222
+ Scaling factor to apply in 3D positional embeddings across time dimension.
223
+ """
224
+
225
+ _supports_gradient_checkpointing = True
226
+ _skip_layerwise_casting_patterns = ["pos_embed", "norm", "adaln_single"]
227
+
228
+ @register_to_config
229
+ def __init__(
230
+ self,
231
+ patch_size: int = 2,
232
+ patch_size_t: int = 1,
233
+ num_attention_heads: int = 24,
234
+ attention_head_dim: int = 96,
235
+ in_channels: int = 4,
236
+ out_channels: int = 4,
237
+ num_layers: int = 32,
238
+ dropout: float = 0.0,
239
+ cross_attention_dim: int = 2304,
240
+ attention_bias: bool = True,
241
+ sample_height: int = 90,
242
+ sample_width: int = 160,
243
+ sample_frames: int = 22,
244
+ activation_fn: str = "gelu-approximate",
245
+ norm_elementwise_affine: bool = False,
246
+ norm_eps: float = 1e-6,
247
+ caption_channels: int = 4096,
248
+ interpolation_scale_h: float = 2.0,
249
+ interpolation_scale_w: float = 2.0,
250
+ interpolation_scale_t: float = 2.2,
251
+ ):
252
+ super().__init__()
253
+
254
+ self.inner_dim = num_attention_heads * attention_head_dim
255
+
256
+ interpolation_scale_t = (
257
+ interpolation_scale_t
258
+ if interpolation_scale_t is not None
259
+ else ((sample_frames - 1) // 16 + 1)
260
+ if sample_frames % 2 == 1
261
+ else sample_frames // 16
262
+ )
263
+ interpolation_scale_h = interpolation_scale_h if interpolation_scale_h is not None else sample_height / 30
264
+ interpolation_scale_w = interpolation_scale_w if interpolation_scale_w is not None else sample_width / 40
265
+
266
+ # 1. Patch embedding
267
+ self.pos_embed = PatchEmbed(
268
+ height=sample_height,
269
+ width=sample_width,
270
+ patch_size=patch_size,
271
+ in_channels=in_channels,
272
+ embed_dim=self.inner_dim,
273
+ pos_embed_type=None,
274
+ )
275
+
276
+ # 2. Transformer blocks
277
+ self.transformer_blocks = nn.ModuleList(
278
+ [
279
+ AllegroTransformerBlock(
280
+ self.inner_dim,
281
+ num_attention_heads,
282
+ attention_head_dim,
283
+ dropout=dropout,
284
+ cross_attention_dim=cross_attention_dim,
285
+ activation_fn=activation_fn,
286
+ attention_bias=attention_bias,
287
+ norm_elementwise_affine=norm_elementwise_affine,
288
+ norm_eps=norm_eps,
289
+ )
290
+ for _ in range(num_layers)
291
+ ]
292
+ )
293
+
294
+ # 3. Output projection & norm
295
+ self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6)
296
+ self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5)
297
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * out_channels)
298
+
299
+ # 4. Timestep embeddings
300
+ self.adaln_single = AdaLayerNormSingle(self.inner_dim, use_additional_conditions=False)
301
+
302
+ # 5. Caption projection
303
+ self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=self.inner_dim)
304
+
305
+ self.gradient_checkpointing = False
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states: torch.Tensor,
310
+ encoder_hidden_states: torch.Tensor,
311
+ timestep: torch.LongTensor,
312
+ attention_mask: Optional[torch.Tensor] = None,
313
+ encoder_attention_mask: Optional[torch.Tensor] = None,
314
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
315
+ return_dict: bool = True,
316
+ ):
317
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
318
+ p_t = self.config.patch_size_t
319
+ p = self.config.patch_size
320
+
321
+ post_patch_num_frames = num_frames // p_t
322
+ post_patch_height = height // p
323
+ post_patch_width = width // p
324
+
325
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
326
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
327
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
328
+ # expects mask of shape:
329
+ # [batch, key_tokens]
330
+ # adds singleton query_tokens dimension:
331
+ # [batch, 1, key_tokens]
332
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
333
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
334
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) attention_mask_vid, attention_mask_img = None, None
335
+ if attention_mask is not None and attention_mask.ndim == 4:
336
+ # assume that mask is expressed as:
337
+ # (1 = keep, 0 = discard)
338
+ # convert mask into a bias that can be added to attention scores:
339
+ # (keep = +0, discard = -10000.0)
340
+ # b, frame+use_image_num, h, w -> a video with images
341
+ # b, 1, h, w -> only images
342
+ attention_mask = attention_mask.to(hidden_states.dtype)
343
+ attention_mask = attention_mask[:, :num_frames] # [batch_size, num_frames, height, width]
344
+
345
+ if attention_mask.numel() > 0:
346
+ attention_mask = attention_mask.unsqueeze(1) # [batch_size, 1, num_frames, height, width]
347
+ attention_mask = F.max_pool3d(attention_mask, kernel_size=(p_t, p, p), stride=(p_t, p, p))
348
+ attention_mask = attention_mask.flatten(1).view(batch_size, 1, -1)
349
+
350
+ attention_mask = (
351
+ (1 - attention_mask.bool().to(hidden_states.dtype)) * -10000.0 if attention_mask.numel() > 0 else None
352
+ )
353
+
354
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
355
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
356
+ encoder_attention_mask = (1 - encoder_attention_mask.to(self.dtype)) * -10000.0
357
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
358
+
359
+ # 1. Timestep embeddings
360
+ timestep, embedded_timestep = self.adaln_single(
361
+ timestep, batch_size=batch_size, hidden_dtype=hidden_states.dtype
362
+ )
363
+
364
+ # 2. Patch embeddings
365
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1)
366
+ hidden_states = self.pos_embed(hidden_states)
367
+ hidden_states = hidden_states.unflatten(0, (batch_size, -1)).flatten(1, 2)
368
+
369
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states)
370
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, encoder_hidden_states.shape[-1])
371
+
372
+ # 3. Transformer blocks
373
+ for i, block in enumerate(self.transformer_blocks):
374
+ # TODO(aryan): Implement gradient checkpointing
375
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
376
+ hidden_states = self._gradient_checkpointing_func(
377
+ block,
378
+ hidden_states,
379
+ encoder_hidden_states,
380
+ timestep,
381
+ attention_mask,
382
+ encoder_attention_mask,
383
+ image_rotary_emb,
384
+ )
385
+ else:
386
+ hidden_states = block(
387
+ hidden_states=hidden_states,
388
+ encoder_hidden_states=encoder_hidden_states,
389
+ temb=timestep,
390
+ attention_mask=attention_mask,
391
+ encoder_attention_mask=encoder_attention_mask,
392
+ image_rotary_emb=image_rotary_emb,
393
+ )
394
+
395
+ # 4. Output normalization & projection
396
+ shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1)
397
+ hidden_states = self.norm_out(hidden_states)
398
+
399
+ # Modulation
400
+ hidden_states = hidden_states * (1 + scale) + shift
401
+ hidden_states = self.proj_out(hidden_states)
402
+ hidden_states = hidden_states.squeeze(1)
403
+
404
+ # 5. Unpatchify
405
+ hidden_states = hidden_states.reshape(
406
+ batch_size, post_patch_num_frames, post_patch_height, post_patch_width, p_t, p, p, -1
407
+ )
408
+ hidden_states = hidden_states.permute(0, 7, 1, 4, 2, 5, 3, 6)
409
+ output = hidden_states.reshape(batch_size, -1, num_frames, height, width)
410
+
411
+ if not return_dict:
412
+ return (output,)
413
+
414
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_bria.py ADDED
@@ -0,0 +1,719 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Any, Dict, List, Optional, Tuple, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ from ...configuration_utils import ConfigMixin, register_to_config
10
+ from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
11
+ from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
12
+ from ...utils.torch_utils import maybe_allow_in_graph
13
+ from ..attention import AttentionModuleMixin, FeedForward
14
+ from ..attention_dispatch import dispatch_attention_fn
15
+ from ..cache_utils import CacheMixin
16
+ from ..embeddings import TimestepEmbedding, apply_rotary_emb, get_timestep_embedding
17
+ from ..modeling_outputs import Transformer2DModelOutput
18
+ from ..modeling_utils import ModelMixin
19
+ from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle
20
+
21
+
22
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
23
+
24
+
25
+ def _get_projections(attn: "BriaAttention", hidden_states, encoder_hidden_states=None):
26
+ query = attn.to_q(hidden_states)
27
+ key = attn.to_k(hidden_states)
28
+ value = attn.to_v(hidden_states)
29
+
30
+ encoder_query = encoder_key = encoder_value = None
31
+ if encoder_hidden_states is not None and attn.added_kv_proj_dim is not None:
32
+ encoder_query = attn.add_q_proj(encoder_hidden_states)
33
+ encoder_key = attn.add_k_proj(encoder_hidden_states)
34
+ encoder_value = attn.add_v_proj(encoder_hidden_states)
35
+
36
+ return query, key, value, encoder_query, encoder_key, encoder_value
37
+
38
+
39
+ def _get_fused_projections(attn: "BriaAttention", hidden_states, encoder_hidden_states=None):
40
+ query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1)
41
+
42
+ encoder_query = encoder_key = encoder_value = (None,)
43
+ if encoder_hidden_states is not None and hasattr(attn, "to_added_qkv"):
44
+ encoder_query, encoder_key, encoder_value = attn.to_added_qkv(encoder_hidden_states).chunk(3, dim=-1)
45
+
46
+ return query, key, value, encoder_query, encoder_key, encoder_value
47
+
48
+
49
+ def _get_qkv_projections(attn: "BriaAttention", hidden_states, encoder_hidden_states=None):
50
+ if attn.fused_projections:
51
+ return _get_fused_projections(attn, hidden_states, encoder_hidden_states)
52
+ return _get_projections(attn, hidden_states, encoder_hidden_states)
53
+
54
+
55
+ def get_1d_rotary_pos_embed(
56
+ dim: int,
57
+ pos: Union[np.ndarray, int],
58
+ theta: float = 10000.0,
59
+ use_real=False,
60
+ linear_factor=1.0,
61
+ ntk_factor=1.0,
62
+ repeat_interleave_real=True,
63
+ freqs_dtype=torch.float32, # torch.float32, torch.float64 (flux)
64
+ ):
65
+ """
66
+ Precompute the frequency tensor for complex exponentials (cis) with given dimensions.
67
+
68
+ This function calculates a frequency tensor with complex exponentials using the given dimension 'dim' and the end
69
+ index 'end'. The 'theta' parameter scales the frequencies. The returned tensor contains complex values in complex64
70
+ data type.
71
+
72
+ Args:
73
+ dim (`int`): Dimension of the frequency tensor.
74
+ pos (`np.ndarray` or `int`): Position indices for the frequency tensor. [S] or scalar
75
+ theta (`float`, *optional*, defaults to 10000.0):
76
+ Scaling factor for frequency computation. Defaults to 10000.0.
77
+ use_real (`bool`, *optional*):
78
+ If True, return real part and imaginary part separately. Otherwise, return complex numbers.
79
+ linear_factor (`float`, *optional*, defaults to 1.0):
80
+ Scaling factor for the context extrapolation. Defaults to 1.0.
81
+ ntk_factor (`float`, *optional*, defaults to 1.0):
82
+ Scaling factor for the NTK-Aware RoPE. Defaults to 1.0.
83
+ repeat_interleave_real (`bool`, *optional*, defaults to `True`):
84
+ If `True` and `use_real`, real part and imaginary part are each interleaved with themselves to reach `dim`.
85
+ Otherwise, they are concateanted with themselves.
86
+ freqs_dtype (`torch.float32` or `torch.float64`, *optional*, defaults to `torch.float32`):
87
+ the dtype of the frequency tensor.
88
+ Returns:
89
+ `torch.Tensor`: Precomputed frequency tensor with complex exponentials. [S, D/2]
90
+ """
91
+ assert dim % 2 == 0
92
+
93
+ if isinstance(pos, int):
94
+ pos = torch.arange(pos)
95
+ if isinstance(pos, np.ndarray):
96
+ pos = torch.from_numpy(pos) # type: ignore # [S]
97
+
98
+ theta = theta * ntk_factor
99
+ freqs = (
100
+ 1.0
101
+ / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype, device=pos.device)[: (dim // 2)] / dim))
102
+ / linear_factor
103
+ ) # [D/2]
104
+ freqs = torch.outer(pos, freqs) # type: ignore # [S, D/2]
105
+ if use_real and repeat_interleave_real:
106
+ # bria
107
+ freqs_cos = freqs.cos().repeat_interleave(2, dim=1).float() # [S, D]
108
+ freqs_sin = freqs.sin().repeat_interleave(2, dim=1).float() # [S, D]
109
+ return freqs_cos, freqs_sin
110
+ elif use_real:
111
+ # stable audio, allegro
112
+ freqs_cos = torch.cat([freqs.cos(), freqs.cos()], dim=-1).float() # [S, D]
113
+ freqs_sin = torch.cat([freqs.sin(), freqs.sin()], dim=-1).float() # [S, D]
114
+ return freqs_cos, freqs_sin
115
+ else:
116
+ # lumina
117
+ freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64 # [S, D/2]
118
+ return freqs_cis
119
+
120
+
121
+ class BriaAttnProcessor:
122
+ _attention_backend = None
123
+
124
+ def __init__(self):
125
+ if not hasattr(F, "scaled_dot_product_attention"):
126
+ raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.")
127
+
128
+ def __call__(
129
+ self,
130
+ attn: "BriaAttention",
131
+ hidden_states: torch.Tensor,
132
+ encoder_hidden_states: torch.Tensor = None,
133
+ attention_mask: Optional[torch.Tensor] = None,
134
+ image_rotary_emb: Optional[torch.Tensor] = None,
135
+ ) -> torch.Tensor:
136
+ query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections(
137
+ attn, hidden_states, encoder_hidden_states
138
+ )
139
+
140
+ query = query.unflatten(-1, (attn.heads, -1))
141
+ key = key.unflatten(-1, (attn.heads, -1))
142
+ value = value.unflatten(-1, (attn.heads, -1))
143
+
144
+ query = attn.norm_q(query)
145
+ key = attn.norm_k(key)
146
+
147
+ if attn.added_kv_proj_dim is not None:
148
+ encoder_query = encoder_query.unflatten(-1, (attn.heads, -1))
149
+ encoder_key = encoder_key.unflatten(-1, (attn.heads, -1))
150
+ encoder_value = encoder_value.unflatten(-1, (attn.heads, -1))
151
+
152
+ encoder_query = attn.norm_added_q(encoder_query)
153
+ encoder_key = attn.norm_added_k(encoder_key)
154
+
155
+ query = torch.cat([encoder_query, query], dim=1)
156
+ key = torch.cat([encoder_key, key], dim=1)
157
+ value = torch.cat([encoder_value, value], dim=1)
158
+
159
+ if image_rotary_emb is not None:
160
+ query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1)
161
+ key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1)
162
+
163
+ hidden_states = dispatch_attention_fn(
164
+ query, key, value, attn_mask=attention_mask, backend=self._attention_backend
165
+ )
166
+ hidden_states = hidden_states.flatten(2, 3)
167
+ hidden_states = hidden_states.to(query.dtype)
168
+
169
+ if encoder_hidden_states is not None:
170
+ encoder_hidden_states, hidden_states = hidden_states.split_with_sizes(
171
+ [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1
172
+ )
173
+ hidden_states = attn.to_out[0](hidden_states)
174
+ hidden_states = attn.to_out[1](hidden_states)
175
+ encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
176
+
177
+ return hidden_states, encoder_hidden_states
178
+ else:
179
+ return hidden_states
180
+
181
+
182
+ class BriaAttention(torch.nn.Module, AttentionModuleMixin):
183
+ _default_processor_cls = BriaAttnProcessor
184
+ _available_processors = [
185
+ BriaAttnProcessor,
186
+ ]
187
+
188
+ def __init__(
189
+ self,
190
+ query_dim: int,
191
+ heads: int = 8,
192
+ dim_head: int = 64,
193
+ dropout: float = 0.0,
194
+ bias: bool = False,
195
+ added_kv_proj_dim: Optional[int] = None,
196
+ added_proj_bias: Optional[bool] = True,
197
+ out_bias: bool = True,
198
+ eps: float = 1e-5,
199
+ out_dim: int = None,
200
+ context_pre_only: Optional[bool] = None,
201
+ pre_only: bool = False,
202
+ elementwise_affine: bool = True,
203
+ processor=None,
204
+ ):
205
+ super().__init__()
206
+
207
+ self.head_dim = dim_head
208
+ self.inner_dim = out_dim if out_dim is not None else dim_head * heads
209
+ self.query_dim = query_dim
210
+ self.use_bias = bias
211
+ self.dropout = dropout
212
+ self.out_dim = out_dim if out_dim is not None else query_dim
213
+ self.context_pre_only = context_pre_only
214
+ self.pre_only = pre_only
215
+ self.heads = out_dim // dim_head if out_dim is not None else heads
216
+ self.added_kv_proj_dim = added_kv_proj_dim
217
+ self.added_proj_bias = added_proj_bias
218
+
219
+ self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
220
+ self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
221
+ self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias)
222
+ self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias)
223
+ self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias)
224
+
225
+ if not self.pre_only:
226
+ self.to_out = torch.nn.ModuleList([])
227
+ self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias))
228
+ self.to_out.append(torch.nn.Dropout(dropout))
229
+
230
+ if added_kv_proj_dim is not None:
231
+ self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps)
232
+ self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps)
233
+ self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias)
234
+ self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias)
235
+ self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias)
236
+ self.to_add_out = torch.nn.Linear(self.inner_dim, query_dim, bias=out_bias)
237
+
238
+ if processor is None:
239
+ processor = self._default_processor_cls()
240
+ self.set_processor(processor)
241
+
242
+ def forward(
243
+ self,
244
+ hidden_states: torch.Tensor,
245
+ encoder_hidden_states: Optional[torch.Tensor] = None,
246
+ attention_mask: Optional[torch.Tensor] = None,
247
+ image_rotary_emb: Optional[torch.Tensor] = None,
248
+ **kwargs,
249
+ ) -> torch.Tensor:
250
+ attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys())
251
+ quiet_attn_parameters = {"ip_adapter_masks", "ip_hidden_states"}
252
+ unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters]
253
+ if len(unused_kwargs) > 0:
254
+ logger.warning(
255
+ f"attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored."
256
+ )
257
+ kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters}
258
+ return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs)
259
+
260
+
261
+ class BriaEmbedND(torch.nn.Module):
262
+ # modified from https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/modules/layers.py#L11
263
+ def __init__(self, theta: int, axes_dim: List[int]):
264
+ super().__init__()
265
+ self.theta = theta
266
+ self.axes_dim = axes_dim
267
+
268
+ def forward(self, ids: torch.Tensor) -> torch.Tensor:
269
+ n_axes = ids.shape[-1]
270
+ cos_out = []
271
+ sin_out = []
272
+ pos = ids.float()
273
+ is_mps = ids.device.type == "mps"
274
+ freqs_dtype = torch.float32 if is_mps else torch.float64
275
+ for i in range(n_axes):
276
+ cos, sin = get_1d_rotary_pos_embed(
277
+ self.axes_dim[i],
278
+ pos[:, i],
279
+ theta=self.theta,
280
+ repeat_interleave_real=True,
281
+ use_real=True,
282
+ freqs_dtype=freqs_dtype,
283
+ )
284
+ cos_out.append(cos)
285
+ sin_out.append(sin)
286
+ freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device)
287
+ freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device)
288
+ return freqs_cos, freqs_sin
289
+
290
+
291
+ class BriaTimesteps(nn.Module):
292
+ def __init__(
293
+ self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float, scale: int = 1, time_theta=10000
294
+ ):
295
+ super().__init__()
296
+ self.num_channels = num_channels
297
+ self.flip_sin_to_cos = flip_sin_to_cos
298
+ self.downscale_freq_shift = downscale_freq_shift
299
+ self.scale = scale
300
+ self.time_theta = time_theta
301
+
302
+ def forward(self, timesteps):
303
+ t_emb = get_timestep_embedding(
304
+ timesteps,
305
+ self.num_channels,
306
+ flip_sin_to_cos=self.flip_sin_to_cos,
307
+ downscale_freq_shift=self.downscale_freq_shift,
308
+ scale=self.scale,
309
+ max_period=self.time_theta,
310
+ )
311
+ return t_emb
312
+
313
+
314
+ class BriaTimestepProjEmbeddings(nn.Module):
315
+ def __init__(self, embedding_dim, time_theta):
316
+ super().__init__()
317
+
318
+ self.time_proj = BriaTimesteps(
319
+ num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0, time_theta=time_theta
320
+ )
321
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
322
+
323
+ def forward(self, timestep, dtype):
324
+ timesteps_proj = self.time_proj(timestep)
325
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=dtype)) # (N, D)
326
+ return timesteps_emb
327
+
328
+
329
+ class BriaPosEmbed(torch.nn.Module):
330
+ # modified from https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/modules/layers.py#L11
331
+ def __init__(self, theta: int, axes_dim: List[int]):
332
+ super().__init__()
333
+ self.theta = theta
334
+ self.axes_dim = axes_dim
335
+
336
+ def forward(self, ids: torch.Tensor) -> torch.Tensor:
337
+ n_axes = ids.shape[-1]
338
+ cos_out = []
339
+ sin_out = []
340
+ pos = ids.float()
341
+ is_mps = ids.device.type == "mps"
342
+ freqs_dtype = torch.float32 if is_mps else torch.float64
343
+ for i in range(n_axes):
344
+ cos, sin = get_1d_rotary_pos_embed(
345
+ self.axes_dim[i],
346
+ pos[:, i],
347
+ theta=self.theta,
348
+ repeat_interleave_real=True,
349
+ use_real=True,
350
+ freqs_dtype=freqs_dtype,
351
+ )
352
+ cos_out.append(cos)
353
+ sin_out.append(sin)
354
+ freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device)
355
+ freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device)
356
+ return freqs_cos, freqs_sin
357
+
358
+
359
+ @maybe_allow_in_graph
360
+ class BriaTransformerBlock(nn.Module):
361
+ def __init__(
362
+ self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6
363
+ ):
364
+ super().__init__()
365
+
366
+ self.norm1 = AdaLayerNormZero(dim)
367
+ self.norm1_context = AdaLayerNormZero(dim)
368
+
369
+ self.attn = BriaAttention(
370
+ query_dim=dim,
371
+ added_kv_proj_dim=dim,
372
+ dim_head=attention_head_dim,
373
+ heads=num_attention_heads,
374
+ out_dim=dim,
375
+ context_pre_only=False,
376
+ bias=True,
377
+ processor=BriaAttnProcessor(),
378
+ eps=eps,
379
+ )
380
+
381
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
382
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
383
+
384
+ self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
385
+ self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
386
+
387
+ def forward(
388
+ self,
389
+ hidden_states: torch.Tensor,
390
+ encoder_hidden_states: torch.Tensor,
391
+ temb: torch.Tensor,
392
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
393
+ attention_kwargs: Optional[Dict[str, Any]] = None,
394
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
395
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
396
+
397
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
398
+ encoder_hidden_states, emb=temb
399
+ )
400
+ attention_kwargs = attention_kwargs or {}
401
+
402
+ # Attention.
403
+ attention_outputs = self.attn(
404
+ hidden_states=norm_hidden_states,
405
+ encoder_hidden_states=norm_encoder_hidden_states,
406
+ image_rotary_emb=image_rotary_emb,
407
+ **attention_kwargs,
408
+ )
409
+
410
+ if len(attention_outputs) == 2:
411
+ attn_output, context_attn_output = attention_outputs
412
+ elif len(attention_outputs) == 3:
413
+ attn_output, context_attn_output, ip_attn_output = attention_outputs
414
+
415
+ # Process attention outputs for the `hidden_states`.
416
+ attn_output = gate_msa.unsqueeze(1) * attn_output
417
+ hidden_states = hidden_states + attn_output
418
+
419
+ norm_hidden_states = self.norm2(hidden_states)
420
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
421
+
422
+ ff_output = self.ff(norm_hidden_states)
423
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
424
+
425
+ hidden_states = hidden_states + ff_output
426
+ if len(attention_outputs) == 3:
427
+ hidden_states = hidden_states + ip_attn_output
428
+
429
+ # Process attention outputs for the `encoder_hidden_states`.
430
+ context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
431
+ encoder_hidden_states = encoder_hidden_states + context_attn_output
432
+
433
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
434
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
435
+
436
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
437
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output
438
+ if encoder_hidden_states.dtype == torch.float16:
439
+ encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504)
440
+
441
+ return encoder_hidden_states, hidden_states
442
+
443
+
444
+ @maybe_allow_in_graph
445
+ class BriaSingleTransformerBlock(nn.Module):
446
+ def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0):
447
+ super().__init__()
448
+ self.mlp_hidden_dim = int(dim * mlp_ratio)
449
+
450
+ self.norm = AdaLayerNormZeroSingle(dim)
451
+ self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim)
452
+ self.act_mlp = nn.GELU(approximate="tanh")
453
+ self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
454
+
455
+ processor = BriaAttnProcessor()
456
+
457
+ self.attn = BriaAttention(
458
+ query_dim=dim,
459
+ dim_head=attention_head_dim,
460
+ heads=num_attention_heads,
461
+ out_dim=dim,
462
+ bias=True,
463
+ processor=processor,
464
+ eps=1e-6,
465
+ pre_only=True,
466
+ )
467
+
468
+ def forward(
469
+ self,
470
+ hidden_states: torch.Tensor,
471
+ encoder_hidden_states: torch.Tensor,
472
+ temb: torch.Tensor,
473
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
474
+ attention_kwargs: Optional[Dict[str, Any]] = None,
475
+ ) -> torch.Tensor:
476
+ text_seq_len = encoder_hidden_states.shape[1]
477
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
478
+
479
+ residual = hidden_states
480
+ norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
481
+ mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
482
+ attention_kwargs = attention_kwargs or {}
483
+ attn_output = self.attn(
484
+ hidden_states=norm_hidden_states,
485
+ image_rotary_emb=image_rotary_emb,
486
+ **attention_kwargs,
487
+ )
488
+
489
+ hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
490
+ gate = gate.unsqueeze(1)
491
+ hidden_states = gate * self.proj_out(hidden_states)
492
+ hidden_states = residual + hidden_states
493
+ if hidden_states.dtype == torch.float16:
494
+ hidden_states = hidden_states.clip(-65504, 65504)
495
+
496
+ encoder_hidden_states, hidden_states = hidden_states[:, :text_seq_len], hidden_states[:, text_seq_len:]
497
+ return encoder_hidden_states, hidden_states
498
+
499
+
500
+ class BriaTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin):
501
+ """
502
+ The Transformer model introduced in Flux. Based on FluxPipeline with several changes:
503
+ - no pooled embeddings
504
+ - We use zero padding for prompts
505
+ - No guidance embedding since this is not a distilled version
506
+ Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
507
+
508
+ Parameters:
509
+ patch_size (`int`): Patch size to turn the input data into small patches.
510
+ in_channels (`int`, *optional*, defaults to 16): The number of channels in the input.
511
+ num_layers (`int`, *optional*, defaults to 18): The number of layers of MMDiT blocks to use.
512
+ num_single_layers (`int`, *optional*, defaults to 18): The number of layers of single DiT blocks to use.
513
+ attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
514
+ num_attention_heads (`int`, *optional*, defaults to 18): The number of heads to use for multi-head attention.
515
+ joint_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
516
+ pooled_projection_dim (`int`): Number of dimensions to use when projecting the `pooled_projections`.
517
+ guidance_embeds (`bool`, defaults to False): Whether to use guidance embeddings.
518
+ """
519
+
520
+ _supports_gradient_checkpointing = True
521
+
522
+ @register_to_config
523
+ def __init__(
524
+ self,
525
+ patch_size: int = 1,
526
+ in_channels: int = 64,
527
+ num_layers: int = 19,
528
+ num_single_layers: int = 38,
529
+ attention_head_dim: int = 128,
530
+ num_attention_heads: int = 24,
531
+ joint_attention_dim: int = 4096,
532
+ pooled_projection_dim: int = None,
533
+ guidance_embeds: bool = False,
534
+ axes_dims_rope: List[int] = [16, 56, 56],
535
+ rope_theta=10000,
536
+ time_theta=10000,
537
+ ):
538
+ super().__init__()
539
+ self.out_channels = in_channels
540
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
541
+
542
+ self.pos_embed = BriaEmbedND(theta=rope_theta, axes_dim=axes_dims_rope)
543
+
544
+ self.time_embed = BriaTimestepProjEmbeddings(embedding_dim=self.inner_dim, time_theta=time_theta)
545
+ if guidance_embeds:
546
+ self.guidance_embed = BriaTimestepProjEmbeddings(embedding_dim=self.inner_dim)
547
+
548
+ self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.inner_dim)
549
+ self.x_embedder = torch.nn.Linear(self.config.in_channels, self.inner_dim)
550
+
551
+ self.transformer_blocks = nn.ModuleList(
552
+ [
553
+ BriaTransformerBlock(
554
+ dim=self.inner_dim,
555
+ num_attention_heads=self.config.num_attention_heads,
556
+ attention_head_dim=self.config.attention_head_dim,
557
+ )
558
+ for i in range(self.config.num_layers)
559
+ ]
560
+ )
561
+
562
+ self.single_transformer_blocks = nn.ModuleList(
563
+ [
564
+ BriaSingleTransformerBlock(
565
+ dim=self.inner_dim,
566
+ num_attention_heads=self.config.num_attention_heads,
567
+ attention_head_dim=self.config.attention_head_dim,
568
+ )
569
+ for i in range(self.config.num_single_layers)
570
+ ]
571
+ )
572
+
573
+ self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
574
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
575
+
576
+ self.gradient_checkpointing = False
577
+
578
+ def forward(
579
+ self,
580
+ hidden_states: torch.Tensor,
581
+ encoder_hidden_states: torch.Tensor = None,
582
+ pooled_projections: torch.Tensor = None,
583
+ timestep: torch.LongTensor = None,
584
+ img_ids: torch.Tensor = None,
585
+ txt_ids: torch.Tensor = None,
586
+ guidance: torch.Tensor = None,
587
+ attention_kwargs: Optional[Dict[str, Any]] = None,
588
+ return_dict: bool = True,
589
+ controlnet_block_samples=None,
590
+ controlnet_single_block_samples=None,
591
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
592
+ """
593
+ The [`BriaTransformer2DModel`] forward method.
594
+
595
+ Args:
596
+ hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`):
597
+ Input `hidden_states`.
598
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`):
599
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
600
+ pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected
601
+ from the embeddings of input conditions.
602
+ timestep ( `torch.LongTensor`):
603
+ Used to indicate denoising step.
604
+ block_controlnet_hidden_states: (`list` of `torch.Tensor`):
605
+ A list of tensors that if specified are added to the residuals of transformer blocks.
606
+ attention_kwargs (`dict`, *optional*):
607
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
608
+ `self.processor` in
609
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
610
+ return_dict (`bool`, *optional*, defaults to `True`):
611
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
612
+ tuple.
613
+
614
+ Returns:
615
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
616
+ `tuple` where the first element is the sample tensor.
617
+ """
618
+ if attention_kwargs is not None:
619
+ attention_kwargs = attention_kwargs.copy()
620
+ lora_scale = attention_kwargs.pop("scale", 1.0)
621
+ else:
622
+ lora_scale = 1.0
623
+
624
+ if USE_PEFT_BACKEND:
625
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
626
+ scale_lora_layers(self, lora_scale)
627
+ else:
628
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
629
+ logger.warning(
630
+ "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective."
631
+ )
632
+ hidden_states = self.x_embedder(hidden_states)
633
+
634
+ timestep = timestep.to(hidden_states.dtype)
635
+ if guidance is not None:
636
+ guidance = guidance.to(hidden_states.dtype)
637
+ else:
638
+ guidance = None
639
+
640
+ temb = self.time_embed(timestep, dtype=hidden_states.dtype)
641
+
642
+ if guidance:
643
+ temb += self.guidance_embed(guidance, dtype=hidden_states.dtype)
644
+
645
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
646
+
647
+ if len(txt_ids.shape) == 3:
648
+ txt_ids = txt_ids[0]
649
+
650
+ if len(img_ids.shape) == 3:
651
+ img_ids = img_ids[0]
652
+
653
+ ids = torch.cat((txt_ids, img_ids), dim=0)
654
+ image_rotary_emb = self.pos_embed(ids)
655
+
656
+ for index_block, block in enumerate(self.transformer_blocks):
657
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
658
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
659
+ block,
660
+ hidden_states,
661
+ encoder_hidden_states,
662
+ temb,
663
+ image_rotary_emb,
664
+ attention_kwargs,
665
+ )
666
+
667
+ else:
668
+ encoder_hidden_states, hidden_states = block(
669
+ hidden_states=hidden_states,
670
+ encoder_hidden_states=encoder_hidden_states,
671
+ temb=temb,
672
+ image_rotary_emb=image_rotary_emb,
673
+ )
674
+
675
+ # controlnet residual
676
+ if controlnet_block_samples is not None:
677
+ interval_control = len(self.transformer_blocks) / len(controlnet_block_samples)
678
+ interval_control = int(np.ceil(interval_control))
679
+ hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control]
680
+
681
+ for index_block, block in enumerate(self.single_transformer_blocks):
682
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
683
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
684
+ block,
685
+ hidden_states,
686
+ encoder_hidden_states,
687
+ temb,
688
+ image_rotary_emb,
689
+ attention_kwargs,
690
+ )
691
+
692
+ else:
693
+ encoder_hidden_states, hidden_states = block(
694
+ hidden_states=hidden_states,
695
+ encoder_hidden_states=encoder_hidden_states,
696
+ temb=temb,
697
+ image_rotary_emb=image_rotary_emb,
698
+ )
699
+
700
+ # controlnet residual
701
+ if controlnet_single_block_samples is not None:
702
+ interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples)
703
+ interval_control = int(np.ceil(interval_control))
704
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...] = (
705
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...]
706
+ + controlnet_single_block_samples[index_block // interval_control]
707
+ )
708
+
709
+ hidden_states = self.norm_out(hidden_states, temb)
710
+ output = self.proj_out(hidden_states)
711
+
712
+ if USE_PEFT_BACKEND:
713
+ # remove `lora_scale` from each PEFT layer
714
+ unscale_lora_layers(self, lora_scale)
715
+
716
+ if not return_dict:
717
+ return (output,)
718
+
719
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_chroma.py ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Black Forest Labs, The HuggingFace Team and loadstone-rock . All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Any, Dict, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ import torch.nn as nn
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin
24
+ from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
25
+ from ...utils.import_utils import is_torch_npu_available
26
+ from ...utils.torch_utils import maybe_allow_in_graph
27
+ from ..attention import AttentionMixin, FeedForward
28
+ from ..cache_utils import CacheMixin
29
+ from ..embeddings import FluxPosEmbed, PixArtAlphaTextProjection, Timesteps, get_timestep_embedding
30
+ from ..modeling_outputs import Transformer2DModelOutput
31
+ from ..modeling_utils import ModelMixin
32
+ from ..normalization import CombinedTimestepLabelEmbeddings, FP32LayerNorm, RMSNorm
33
+ from .transformer_flux import FluxAttention, FluxAttnProcessor
34
+
35
+
36
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
+
38
+
39
+ class ChromaAdaLayerNormZeroPruned(nn.Module):
40
+ r"""
41
+ Norm layer adaptive layer norm zero (adaLN-Zero).
42
+
43
+ Parameters:
44
+ embedding_dim (`int`): The size of each embedding vector.
45
+ num_embeddings (`int`): The size of the embeddings dictionary.
46
+ """
47
+
48
+ def __init__(self, embedding_dim: int, num_embeddings: Optional[int] = None, norm_type="layer_norm", bias=True):
49
+ super().__init__()
50
+ if num_embeddings is not None:
51
+ self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim)
52
+ else:
53
+ self.emb = None
54
+
55
+ if norm_type == "layer_norm":
56
+ self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
57
+ elif norm_type == "fp32_layer_norm":
58
+ self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=False, bias=False)
59
+ else:
60
+ raise ValueError(
61
+ f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'."
62
+ )
63
+
64
+ def forward(
65
+ self,
66
+ x: torch.Tensor,
67
+ timestep: Optional[torch.Tensor] = None,
68
+ class_labels: Optional[torch.LongTensor] = None,
69
+ hidden_dtype: Optional[torch.dtype] = None,
70
+ emb: Optional[torch.Tensor] = None,
71
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
72
+ if self.emb is not None:
73
+ emb = self.emb(timestep, class_labels, hidden_dtype=hidden_dtype)
74
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.flatten(1, 2).chunk(6, dim=1)
75
+ x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
76
+ return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
77
+
78
+
79
+ class ChromaAdaLayerNormZeroSinglePruned(nn.Module):
80
+ r"""
81
+ Norm layer adaptive layer norm zero (adaLN-Zero).
82
+
83
+ Parameters:
84
+ embedding_dim (`int`): The size of each embedding vector.
85
+ num_embeddings (`int`): The size of the embeddings dictionary.
86
+ """
87
+
88
+ def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
89
+ super().__init__()
90
+
91
+ if norm_type == "layer_norm":
92
+ self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
93
+ else:
94
+ raise ValueError(
95
+ f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'."
96
+ )
97
+
98
+ def forward(
99
+ self,
100
+ x: torch.Tensor,
101
+ emb: Optional[torch.Tensor] = None,
102
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
103
+ shift_msa, scale_msa, gate_msa = emb.flatten(1, 2).chunk(3, dim=1)
104
+ x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
105
+ return x, gate_msa
106
+
107
+
108
+ class ChromaAdaLayerNormContinuousPruned(nn.Module):
109
+ r"""
110
+ Adaptive normalization layer with a norm layer (layer_norm or rms_norm).
111
+
112
+ Args:
113
+ embedding_dim (`int`): Embedding dimension to use during projection.
114
+ conditioning_embedding_dim (`int`): Dimension of the input condition.
115
+ elementwise_affine (`bool`, defaults to `True`):
116
+ Boolean flag to denote if affine transformation should be applied.
117
+ eps (`float`, defaults to 1e-5): Epsilon factor.
118
+ bias (`bias`, defaults to `True`): Boolean flag to denote if bias should be use.
119
+ norm_type (`str`, defaults to `"layer_norm"`):
120
+ Normalization layer to use. Values supported: "layer_norm", "rms_norm".
121
+ """
122
+
123
+ def __init__(
124
+ self,
125
+ embedding_dim: int,
126
+ conditioning_embedding_dim: int,
127
+ # NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters
128
+ # because the output is immediately scaled and shifted by the projected conditioning embeddings.
129
+ # Note that AdaLayerNorm does not let the norm layer have scale and shift parameters.
130
+ # However, this is how it was implemented in the original code, and it's rather likely you should
131
+ # set `elementwise_affine` to False.
132
+ elementwise_affine=True,
133
+ eps=1e-5,
134
+ bias=True,
135
+ norm_type="layer_norm",
136
+ ):
137
+ super().__init__()
138
+ if norm_type == "layer_norm":
139
+ self.norm = nn.LayerNorm(embedding_dim, eps, elementwise_affine, bias)
140
+ elif norm_type == "rms_norm":
141
+ self.norm = RMSNorm(embedding_dim, eps, elementwise_affine)
142
+ else:
143
+ raise ValueError(f"unknown norm_type {norm_type}")
144
+
145
+ def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor:
146
+ # convert back to the original dtype in case `conditioning_embedding`` is upcasted to float32 (needed for hunyuanDiT)
147
+ shift, scale = torch.chunk(emb.flatten(1, 2).to(x.dtype), 2, dim=1)
148
+ x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
149
+ return x
150
+
151
+
152
+ class ChromaCombinedTimestepTextProjEmbeddings(nn.Module):
153
+ def __init__(self, num_channels: int, out_dim: int):
154
+ super().__init__()
155
+
156
+ self.time_proj = Timesteps(num_channels=num_channels, flip_sin_to_cos=True, downscale_freq_shift=0)
157
+ self.guidance_proj = Timesteps(num_channels=num_channels, flip_sin_to_cos=True, downscale_freq_shift=0)
158
+
159
+ self.register_buffer(
160
+ "mod_proj",
161
+ get_timestep_embedding(
162
+ torch.arange(out_dim) * 1000, 2 * num_channels, flip_sin_to_cos=True, downscale_freq_shift=0
163
+ ),
164
+ persistent=False,
165
+ )
166
+
167
+ def forward(self, timestep: torch.Tensor) -> torch.Tensor:
168
+ mod_index_length = self.mod_proj.shape[0]
169
+ batch_size = timestep.shape[0]
170
+
171
+ timesteps_proj = self.time_proj(timestep).to(dtype=timestep.dtype)
172
+ guidance_proj = self.guidance_proj(torch.tensor([0] * batch_size)).to(
173
+ dtype=timestep.dtype, device=timestep.device
174
+ )
175
+
176
+ mod_proj = self.mod_proj.to(dtype=timesteps_proj.dtype, device=timesteps_proj.device).repeat(batch_size, 1, 1)
177
+ timestep_guidance = (
178
+ torch.cat([timesteps_proj, guidance_proj], dim=1).unsqueeze(1).repeat(1, mod_index_length, 1)
179
+ )
180
+ input_vec = torch.cat([timestep_guidance, mod_proj], dim=-1)
181
+ return input_vec.to(timestep.dtype)
182
+
183
+
184
+ class ChromaApproximator(nn.Module):
185
+ def __init__(self, in_dim: int, out_dim: int, hidden_dim: int, n_layers: int = 5):
186
+ super().__init__()
187
+ self.in_proj = nn.Linear(in_dim, hidden_dim, bias=True)
188
+ self.layers = nn.ModuleList(
189
+ [PixArtAlphaTextProjection(hidden_dim, hidden_dim, act_fn="silu") for _ in range(n_layers)]
190
+ )
191
+ self.norms = nn.ModuleList([nn.RMSNorm(hidden_dim) for _ in range(n_layers)])
192
+ self.out_proj = nn.Linear(hidden_dim, out_dim)
193
+
194
+ def forward(self, x):
195
+ x = self.in_proj(x)
196
+
197
+ for layer, norms in zip(self.layers, self.norms):
198
+ x = x + layer(norms(x))
199
+
200
+ return self.out_proj(x)
201
+
202
+
203
+ @maybe_allow_in_graph
204
+ class ChromaSingleTransformerBlock(nn.Module):
205
+ def __init__(
206
+ self,
207
+ dim: int,
208
+ num_attention_heads: int,
209
+ attention_head_dim: int,
210
+ mlp_ratio: float = 4.0,
211
+ ):
212
+ super().__init__()
213
+ self.mlp_hidden_dim = int(dim * mlp_ratio)
214
+ self.norm = ChromaAdaLayerNormZeroSinglePruned(dim)
215
+ self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim)
216
+ self.act_mlp = nn.GELU(approximate="tanh")
217
+ self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
218
+
219
+ if is_torch_npu_available():
220
+ from ..attention_processor import FluxAttnProcessor2_0_NPU
221
+
222
+ deprecation_message = (
223
+ "Defaulting to FluxAttnProcessor2_0_NPU for NPU devices will be removed. Attention processors "
224
+ "should be set explicitly using the `set_attn_processor` method."
225
+ )
226
+ deprecate("npu_processor", "0.34.0", deprecation_message)
227
+ processor = FluxAttnProcessor2_0_NPU()
228
+ else:
229
+ processor = FluxAttnProcessor()
230
+
231
+ self.attn = FluxAttention(
232
+ query_dim=dim,
233
+ dim_head=attention_head_dim,
234
+ heads=num_attention_heads,
235
+ out_dim=dim,
236
+ bias=True,
237
+ processor=processor,
238
+ eps=1e-6,
239
+ pre_only=True,
240
+ )
241
+
242
+ def forward(
243
+ self,
244
+ hidden_states: torch.Tensor,
245
+ temb: torch.Tensor,
246
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
247
+ attention_mask: Optional[torch.Tensor] = None,
248
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
249
+ ) -> torch.Tensor:
250
+ residual = hidden_states
251
+ norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
252
+ mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
253
+ joint_attention_kwargs = joint_attention_kwargs or {}
254
+
255
+ if attention_mask is not None:
256
+ attention_mask = attention_mask[:, None, None, :] * attention_mask[:, None, :, None]
257
+
258
+ attn_output = self.attn(
259
+ hidden_states=norm_hidden_states,
260
+ image_rotary_emb=image_rotary_emb,
261
+ attention_mask=attention_mask,
262
+ **joint_attention_kwargs,
263
+ )
264
+
265
+ hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
266
+ gate = gate.unsqueeze(1)
267
+ hidden_states = gate * self.proj_out(hidden_states)
268
+ hidden_states = residual + hidden_states
269
+ if hidden_states.dtype == torch.float16:
270
+ hidden_states = hidden_states.clip(-65504, 65504)
271
+
272
+ return hidden_states
273
+
274
+
275
+ @maybe_allow_in_graph
276
+ class ChromaTransformerBlock(nn.Module):
277
+ def __init__(
278
+ self,
279
+ dim: int,
280
+ num_attention_heads: int,
281
+ attention_head_dim: int,
282
+ qk_norm: str = "rms_norm",
283
+ eps: float = 1e-6,
284
+ ):
285
+ super().__init__()
286
+ self.norm1 = ChromaAdaLayerNormZeroPruned(dim)
287
+ self.norm1_context = ChromaAdaLayerNormZeroPruned(dim)
288
+
289
+ self.attn = FluxAttention(
290
+ query_dim=dim,
291
+ added_kv_proj_dim=dim,
292
+ dim_head=attention_head_dim,
293
+ heads=num_attention_heads,
294
+ out_dim=dim,
295
+ context_pre_only=False,
296
+ bias=True,
297
+ processor=FluxAttnProcessor(),
298
+ eps=eps,
299
+ )
300
+
301
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
302
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
303
+
304
+ self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
305
+ self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states: torch.Tensor,
310
+ encoder_hidden_states: torch.Tensor,
311
+ temb: torch.Tensor,
312
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
313
+ attention_mask: Optional[torch.Tensor] = None,
314
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
315
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
316
+ temb_img, temb_txt = temb[:, :6], temb[:, 6:]
317
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb_img)
318
+
319
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
320
+ encoder_hidden_states, emb=temb_txt
321
+ )
322
+ joint_attention_kwargs = joint_attention_kwargs or {}
323
+ if attention_mask is not None:
324
+ attention_mask = attention_mask[:, None, None, :] * attention_mask[:, None, :, None]
325
+
326
+ # Attention.
327
+ attention_outputs = self.attn(
328
+ hidden_states=norm_hidden_states,
329
+ encoder_hidden_states=norm_encoder_hidden_states,
330
+ image_rotary_emb=image_rotary_emb,
331
+ attention_mask=attention_mask,
332
+ **joint_attention_kwargs,
333
+ )
334
+
335
+ if len(attention_outputs) == 2:
336
+ attn_output, context_attn_output = attention_outputs
337
+ elif len(attention_outputs) == 3:
338
+ attn_output, context_attn_output, ip_attn_output = attention_outputs
339
+
340
+ # Process attention outputs for the `hidden_states`.
341
+ attn_output = gate_msa.unsqueeze(1) * attn_output
342
+ hidden_states = hidden_states + attn_output
343
+
344
+ norm_hidden_states = self.norm2(hidden_states)
345
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
346
+
347
+ ff_output = self.ff(norm_hidden_states)
348
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
349
+
350
+ hidden_states = hidden_states + ff_output
351
+ if len(attention_outputs) == 3:
352
+ hidden_states = hidden_states + ip_attn_output
353
+
354
+ # Process attention outputs for the `encoder_hidden_states`.
355
+
356
+ context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
357
+ encoder_hidden_states = encoder_hidden_states + context_attn_output
358
+
359
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
360
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
361
+
362
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
363
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output
364
+ if encoder_hidden_states.dtype == torch.float16:
365
+ encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504)
366
+
367
+ return encoder_hidden_states, hidden_states
368
+
369
+
370
+ class ChromaTransformer2DModel(
371
+ ModelMixin,
372
+ ConfigMixin,
373
+ PeftAdapterMixin,
374
+ FromOriginalModelMixin,
375
+ FluxTransformer2DLoadersMixin,
376
+ CacheMixin,
377
+ AttentionMixin,
378
+ ):
379
+ """
380
+ The Transformer model introduced in Flux, modified for Chroma.
381
+
382
+ Reference: https://huggingface.co/lodestones/Chroma
383
+
384
+ Args:
385
+ patch_size (`int`, defaults to `1`):
386
+ Patch size to turn the input data into small patches.
387
+ in_channels (`int`, defaults to `64`):
388
+ The number of channels in the input.
389
+ out_channels (`int`, *optional*, defaults to `None`):
390
+ The number of channels in the output. If not specified, it defaults to `in_channels`.
391
+ num_layers (`int`, defaults to `19`):
392
+ The number of layers of dual stream DiT blocks to use.
393
+ num_single_layers (`int`, defaults to `38`):
394
+ The number of layers of single stream DiT blocks to use.
395
+ attention_head_dim (`int`, defaults to `128`):
396
+ The number of dimensions to use for each attention head.
397
+ num_attention_heads (`int`, defaults to `24`):
398
+ The number of attention heads to use.
399
+ joint_attention_dim (`int`, defaults to `4096`):
400
+ The number of dimensions to use for the joint attention (embedding/channel dimension of
401
+ `encoder_hidden_states`).
402
+ axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`):
403
+ The dimensions to use for the rotary positional embeddings.
404
+ """
405
+
406
+ _supports_gradient_checkpointing = True
407
+ _no_split_modules = ["ChromaTransformerBlock", "ChromaSingleTransformerBlock"]
408
+ _repeated_blocks = ["ChromaTransformerBlock", "ChromaSingleTransformerBlock"]
409
+ _skip_layerwise_casting_patterns = ["pos_embed", "norm"]
410
+
411
+ @register_to_config
412
+ def __init__(
413
+ self,
414
+ patch_size: int = 1,
415
+ in_channels: int = 64,
416
+ out_channels: Optional[int] = None,
417
+ num_layers: int = 19,
418
+ num_single_layers: int = 38,
419
+ attention_head_dim: int = 128,
420
+ num_attention_heads: int = 24,
421
+ joint_attention_dim: int = 4096,
422
+ axes_dims_rope: Tuple[int, ...] = (16, 56, 56),
423
+ approximator_num_channels: int = 64,
424
+ approximator_hidden_dim: int = 5120,
425
+ approximator_layers: int = 5,
426
+ ):
427
+ super().__init__()
428
+ self.out_channels = out_channels or in_channels
429
+ self.inner_dim = num_attention_heads * attention_head_dim
430
+
431
+ self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope)
432
+
433
+ self.time_text_embed = ChromaCombinedTimestepTextProjEmbeddings(
434
+ num_channels=approximator_num_channels // 4,
435
+ out_dim=3 * num_single_layers + 2 * 6 * num_layers + 2,
436
+ )
437
+ self.distilled_guidance_layer = ChromaApproximator(
438
+ in_dim=approximator_num_channels,
439
+ out_dim=self.inner_dim,
440
+ hidden_dim=approximator_hidden_dim,
441
+ n_layers=approximator_layers,
442
+ )
443
+
444
+ self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim)
445
+ self.x_embedder = nn.Linear(in_channels, self.inner_dim)
446
+
447
+ self.transformer_blocks = nn.ModuleList(
448
+ [
449
+ ChromaTransformerBlock(
450
+ dim=self.inner_dim,
451
+ num_attention_heads=num_attention_heads,
452
+ attention_head_dim=attention_head_dim,
453
+ )
454
+ for _ in range(num_layers)
455
+ ]
456
+ )
457
+
458
+ self.single_transformer_blocks = nn.ModuleList(
459
+ [
460
+ ChromaSingleTransformerBlock(
461
+ dim=self.inner_dim,
462
+ num_attention_heads=num_attention_heads,
463
+ attention_head_dim=attention_head_dim,
464
+ )
465
+ for _ in range(num_single_layers)
466
+ ]
467
+ )
468
+
469
+ self.norm_out = ChromaAdaLayerNormContinuousPruned(
470
+ self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6
471
+ )
472
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
473
+
474
+ self.gradient_checkpointing = False
475
+
476
+ def forward(
477
+ self,
478
+ hidden_states: torch.Tensor,
479
+ encoder_hidden_states: torch.Tensor = None,
480
+ timestep: torch.LongTensor = None,
481
+ img_ids: torch.Tensor = None,
482
+ txt_ids: torch.Tensor = None,
483
+ attention_mask: torch.Tensor = None,
484
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
485
+ controlnet_block_samples=None,
486
+ controlnet_single_block_samples=None,
487
+ return_dict: bool = True,
488
+ controlnet_blocks_repeat: bool = False,
489
+ ) -> Union[torch.Tensor, Transformer2DModelOutput]:
490
+ """
491
+ The [`FluxTransformer2DModel`] forward method.
492
+
493
+ Args:
494
+ hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`):
495
+ Input `hidden_states`.
496
+ encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`):
497
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
498
+ timestep ( `torch.LongTensor`):
499
+ Used to indicate denoising step.
500
+ block_controlnet_hidden_states: (`list` of `torch.Tensor`):
501
+ A list of tensors that if specified are added to the residuals of transformer blocks.
502
+ joint_attention_kwargs (`dict`, *optional*):
503
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
504
+ `self.processor` in
505
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
506
+ return_dict (`bool`, *optional*, defaults to `True`):
507
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
508
+ tuple.
509
+
510
+ Returns:
511
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
512
+ `tuple` where the first element is the sample tensor.
513
+ """
514
+ if joint_attention_kwargs is not None:
515
+ joint_attention_kwargs = joint_attention_kwargs.copy()
516
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
517
+ else:
518
+ lora_scale = 1.0
519
+
520
+ if USE_PEFT_BACKEND:
521
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
522
+ scale_lora_layers(self, lora_scale)
523
+ else:
524
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
525
+ logger.warning(
526
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
527
+ )
528
+
529
+ hidden_states = self.x_embedder(hidden_states)
530
+
531
+ timestep = timestep.to(hidden_states.dtype) * 1000
532
+
533
+ input_vec = self.time_text_embed(timestep)
534
+ pooled_temb = self.distilled_guidance_layer(input_vec)
535
+
536
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
537
+
538
+ if txt_ids.ndim == 3:
539
+ logger.warning(
540
+ "Passing `txt_ids` 3d torch.Tensor is deprecated."
541
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
542
+ )
543
+ txt_ids = txt_ids[0]
544
+ if img_ids.ndim == 3:
545
+ logger.warning(
546
+ "Passing `img_ids` 3d torch.Tensor is deprecated."
547
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
548
+ )
549
+ img_ids = img_ids[0]
550
+
551
+ ids = torch.cat((txt_ids, img_ids), dim=0)
552
+ image_rotary_emb = self.pos_embed(ids)
553
+
554
+ if joint_attention_kwargs is not None and "ip_adapter_image_embeds" in joint_attention_kwargs:
555
+ ip_adapter_image_embeds = joint_attention_kwargs.pop("ip_adapter_image_embeds")
556
+ ip_hidden_states = self.encoder_hid_proj(ip_adapter_image_embeds)
557
+ joint_attention_kwargs.update({"ip_hidden_states": ip_hidden_states})
558
+
559
+ for index_block, block in enumerate(self.transformer_blocks):
560
+ img_offset = 3 * len(self.single_transformer_blocks)
561
+ txt_offset = img_offset + 6 * len(self.transformer_blocks)
562
+ img_modulation = img_offset + 6 * index_block
563
+ text_modulation = txt_offset + 6 * index_block
564
+ temb = torch.cat(
565
+ (
566
+ pooled_temb[:, img_modulation : img_modulation + 6],
567
+ pooled_temb[:, text_modulation : text_modulation + 6],
568
+ ),
569
+ dim=1,
570
+ )
571
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
572
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
573
+ block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, attention_mask
574
+ )
575
+
576
+ else:
577
+ encoder_hidden_states, hidden_states = block(
578
+ hidden_states=hidden_states,
579
+ encoder_hidden_states=encoder_hidden_states,
580
+ temb=temb,
581
+ image_rotary_emb=image_rotary_emb,
582
+ attention_mask=attention_mask,
583
+ joint_attention_kwargs=joint_attention_kwargs,
584
+ )
585
+
586
+ # controlnet residual
587
+ if controlnet_block_samples is not None:
588
+ interval_control = len(self.transformer_blocks) / len(controlnet_block_samples)
589
+ interval_control = int(np.ceil(interval_control))
590
+ # For Xlabs ControlNet.
591
+ if controlnet_blocks_repeat:
592
+ hidden_states = (
593
+ hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)]
594
+ )
595
+ else:
596
+ hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control]
597
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
598
+
599
+ for index_block, block in enumerate(self.single_transformer_blocks):
600
+ start_idx = 3 * index_block
601
+ temb = pooled_temb[:, start_idx : start_idx + 3]
602
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
603
+ hidden_states = self._gradient_checkpointing_func(
604
+ block,
605
+ hidden_states,
606
+ temb,
607
+ image_rotary_emb,
608
+ )
609
+
610
+ else:
611
+ hidden_states = block(
612
+ hidden_states=hidden_states,
613
+ temb=temb,
614
+ image_rotary_emb=image_rotary_emb,
615
+ attention_mask=attention_mask,
616
+ joint_attention_kwargs=joint_attention_kwargs,
617
+ )
618
+
619
+ # controlnet residual
620
+ if controlnet_single_block_samples is not None:
621
+ interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples)
622
+ interval_control = int(np.ceil(interval_control))
623
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...] = (
624
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...]
625
+ + controlnet_single_block_samples[index_block // interval_control]
626
+ )
627
+
628
+ hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...]
629
+
630
+ temb = pooled_temb[:, -2:]
631
+ hidden_states = self.norm_out(hidden_states, temb)
632
+ output = self.proj_out(hidden_states)
633
+
634
+ if USE_PEFT_BACKEND:
635
+ # remove `lora_scale` from each PEFT layer
636
+ unscale_lora_layers(self, lora_scale)
637
+
638
+ if not return_dict:
639
+ return (output,)
640
+
641
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_cogview3plus.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The CogView team, Tsinghua University & ZhipuAI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Dict, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...utils import logging
23
+ from ..attention import FeedForward
24
+ from ..attention_processor import Attention, AttentionProcessor, CogVideoXAttnProcessor2_0
25
+ from ..embeddings import CogView3CombinedTimestepSizeEmbeddings, CogView3PlusPatchEmbed
26
+ from ..modeling_outputs import Transformer2DModelOutput
27
+ from ..modeling_utils import ModelMixin
28
+ from ..normalization import AdaLayerNormContinuous, CogView3PlusAdaLayerNormZeroTextImage
29
+
30
+
31
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
32
+
33
+
34
+ class CogView3PlusTransformerBlock(nn.Module):
35
+ r"""
36
+ Transformer block used in [CogView](https://github.com/THUDM/CogView3) model.
37
+
38
+ Args:
39
+ dim (`int`):
40
+ The number of channels in the input and output.
41
+ num_attention_heads (`int`):
42
+ The number of heads to use for multi-head attention.
43
+ attention_head_dim (`int`):
44
+ The number of channels in each head.
45
+ time_embed_dim (`int`):
46
+ The number of channels in timestep embedding.
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ dim: int = 2560,
52
+ num_attention_heads: int = 64,
53
+ attention_head_dim: int = 40,
54
+ time_embed_dim: int = 512,
55
+ ):
56
+ super().__init__()
57
+
58
+ self.norm1 = CogView3PlusAdaLayerNormZeroTextImage(embedding_dim=time_embed_dim, dim=dim)
59
+
60
+ self.attn1 = Attention(
61
+ query_dim=dim,
62
+ heads=num_attention_heads,
63
+ dim_head=attention_head_dim,
64
+ out_dim=dim,
65
+ bias=True,
66
+ qk_norm="layer_norm",
67
+ elementwise_affine=False,
68
+ eps=1e-6,
69
+ processor=CogVideoXAttnProcessor2_0(),
70
+ )
71
+
72
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5)
73
+ self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5)
74
+
75
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
76
+
77
+ def forward(
78
+ self,
79
+ hidden_states: torch.Tensor,
80
+ encoder_hidden_states: torch.Tensor,
81
+ emb: torch.Tensor,
82
+ ) -> torch.Tensor:
83
+ text_seq_length = encoder_hidden_states.size(1)
84
+
85
+ # norm & modulate
86
+ (
87
+ norm_hidden_states,
88
+ gate_msa,
89
+ shift_mlp,
90
+ scale_mlp,
91
+ gate_mlp,
92
+ norm_encoder_hidden_states,
93
+ c_gate_msa,
94
+ c_shift_mlp,
95
+ c_scale_mlp,
96
+ c_gate_mlp,
97
+ ) = self.norm1(hidden_states, encoder_hidden_states, emb)
98
+
99
+ # attention
100
+ attn_hidden_states, attn_encoder_hidden_states = self.attn1(
101
+ hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states
102
+ )
103
+
104
+ hidden_states = hidden_states + gate_msa.unsqueeze(1) * attn_hidden_states
105
+ encoder_hidden_states = encoder_hidden_states + c_gate_msa.unsqueeze(1) * attn_encoder_hidden_states
106
+
107
+ # norm & modulate
108
+ norm_hidden_states = self.norm2(hidden_states)
109
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
110
+
111
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
112
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
113
+
114
+ # feed-forward
115
+ norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1)
116
+ ff_output = self.ff(norm_hidden_states)
117
+
118
+ hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output[:, text_seq_length:]
119
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * ff_output[:, :text_seq_length]
120
+
121
+ if hidden_states.dtype == torch.float16:
122
+ hidden_states = hidden_states.clip(-65504, 65504)
123
+ if encoder_hidden_states.dtype == torch.float16:
124
+ encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504)
125
+ return hidden_states, encoder_hidden_states
126
+
127
+
128
+ class CogView3PlusTransformer2DModel(ModelMixin, ConfigMixin):
129
+ r"""
130
+ The Transformer model introduced in [CogView3: Finer and Faster Text-to-Image Generation via Relay
131
+ Diffusion](https://huggingface.co/papers/2403.05121).
132
+
133
+ Args:
134
+ patch_size (`int`, defaults to `2`):
135
+ The size of the patches to use in the patch embedding layer.
136
+ in_channels (`int`, defaults to `16`):
137
+ The number of channels in the input.
138
+ num_layers (`int`, defaults to `30`):
139
+ The number of layers of Transformer blocks to use.
140
+ attention_head_dim (`int`, defaults to `40`):
141
+ The number of channels in each head.
142
+ num_attention_heads (`int`, defaults to `64`):
143
+ The number of heads to use for multi-head attention.
144
+ out_channels (`int`, defaults to `16`):
145
+ The number of channels in the output.
146
+ text_embed_dim (`int`, defaults to `4096`):
147
+ Input dimension of text embeddings from the text encoder.
148
+ time_embed_dim (`int`, defaults to `512`):
149
+ Output dimension of timestep embeddings.
150
+ condition_dim (`int`, defaults to `256`):
151
+ The embedding dimension of the input SDXL-style resolution conditions (original_size, target_size,
152
+ crop_coords).
153
+ pos_embed_max_size (`int`, defaults to `128`):
154
+ The maximum resolution of the positional embeddings, from which slices of shape `H x W` are taken and added
155
+ to input patched latents, where `H` and `W` are the latent height and width respectively. A value of 128
156
+ means that the maximum supported height and width for image generation is `128 * vae_scale_factor *
157
+ patch_size => 128 * 8 * 2 => 2048`.
158
+ sample_size (`int`, defaults to `128`):
159
+ The base resolution of input latents. If height/width is not provided during generation, this value is used
160
+ to determine the resolution as `sample_size * vae_scale_factor => 128 * 8 => 1024`
161
+ """
162
+
163
+ _supports_gradient_checkpointing = True
164
+ _skip_layerwise_casting_patterns = ["patch_embed", "norm"]
165
+ _no_split_modules = ["CogView3PlusTransformerBlock", "CogView3PlusPatchEmbed"]
166
+
167
+ @register_to_config
168
+ def __init__(
169
+ self,
170
+ patch_size: int = 2,
171
+ in_channels: int = 16,
172
+ num_layers: int = 30,
173
+ attention_head_dim: int = 40,
174
+ num_attention_heads: int = 64,
175
+ out_channels: int = 16,
176
+ text_embed_dim: int = 4096,
177
+ time_embed_dim: int = 512,
178
+ condition_dim: int = 256,
179
+ pos_embed_max_size: int = 128,
180
+ sample_size: int = 128,
181
+ ):
182
+ super().__init__()
183
+ self.out_channels = out_channels
184
+ self.inner_dim = num_attention_heads * attention_head_dim
185
+
186
+ # CogView3 uses 3 additional SDXL-like conditions - original_size, target_size, crop_coords
187
+ # Each of these are sincos embeddings of shape 2 * condition_dim
188
+ self.pooled_projection_dim = 3 * 2 * condition_dim
189
+
190
+ self.patch_embed = CogView3PlusPatchEmbed(
191
+ in_channels=in_channels,
192
+ hidden_size=self.inner_dim,
193
+ patch_size=patch_size,
194
+ text_hidden_size=text_embed_dim,
195
+ pos_embed_max_size=pos_embed_max_size,
196
+ )
197
+
198
+ self.time_condition_embed = CogView3CombinedTimestepSizeEmbeddings(
199
+ embedding_dim=time_embed_dim,
200
+ condition_dim=condition_dim,
201
+ pooled_projection_dim=self.pooled_projection_dim,
202
+ timesteps_dim=self.inner_dim,
203
+ )
204
+
205
+ self.transformer_blocks = nn.ModuleList(
206
+ [
207
+ CogView3PlusTransformerBlock(
208
+ dim=self.inner_dim,
209
+ num_attention_heads=num_attention_heads,
210
+ attention_head_dim=attention_head_dim,
211
+ time_embed_dim=time_embed_dim,
212
+ )
213
+ for _ in range(num_layers)
214
+ ]
215
+ )
216
+
217
+ self.norm_out = AdaLayerNormContinuous(
218
+ embedding_dim=self.inner_dim,
219
+ conditioning_embedding_dim=time_embed_dim,
220
+ elementwise_affine=False,
221
+ eps=1e-6,
222
+ )
223
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
224
+
225
+ self.gradient_checkpointing = False
226
+
227
+ @property
228
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
229
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
230
+ r"""
231
+ Returns:
232
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
233
+ indexed by its weight name.
234
+ """
235
+ # set recursively
236
+ processors = {}
237
+
238
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
239
+ if hasattr(module, "get_processor"):
240
+ processors[f"{name}.processor"] = module.get_processor()
241
+
242
+ for sub_name, child in module.named_children():
243
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
244
+
245
+ return processors
246
+
247
+ for name, module in self.named_children():
248
+ fn_recursive_add_processors(name, module, processors)
249
+
250
+ return processors
251
+
252
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
253
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
254
+ r"""
255
+ Sets the attention processor to use to compute attention.
256
+
257
+ Parameters:
258
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
259
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
260
+ for **all** `Attention` layers.
261
+
262
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
263
+ processor. This is strongly recommended when setting trainable attention processors.
264
+
265
+ """
266
+ count = len(self.attn_processors.keys())
267
+
268
+ if isinstance(processor, dict) and len(processor) != count:
269
+ raise ValueError(
270
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
271
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
272
+ )
273
+
274
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
275
+ if hasattr(module, "set_processor"):
276
+ if not isinstance(processor, dict):
277
+ module.set_processor(processor)
278
+ else:
279
+ module.set_processor(processor.pop(f"{name}.processor"))
280
+
281
+ for sub_name, child in module.named_children():
282
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
283
+
284
+ for name, module in self.named_children():
285
+ fn_recursive_attn_processor(name, module, processor)
286
+
287
+ def forward(
288
+ self,
289
+ hidden_states: torch.Tensor,
290
+ encoder_hidden_states: torch.Tensor,
291
+ timestep: torch.LongTensor,
292
+ original_size: torch.Tensor,
293
+ target_size: torch.Tensor,
294
+ crop_coords: torch.Tensor,
295
+ return_dict: bool = True,
296
+ ) -> Union[torch.Tensor, Transformer2DModelOutput]:
297
+ """
298
+ The [`CogView3PlusTransformer2DModel`] forward method.
299
+
300
+ Args:
301
+ hidden_states (`torch.Tensor`):
302
+ Input `hidden_states` of shape `(batch size, channel, height, width)`.
303
+ encoder_hidden_states (`torch.Tensor`):
304
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) of shape
305
+ `(batch_size, sequence_len, text_embed_dim)`
306
+ timestep (`torch.LongTensor`):
307
+ Used to indicate denoising step.
308
+ original_size (`torch.Tensor`):
309
+ CogView3 uses SDXL-like micro-conditioning for original image size as explained in section 2.2 of
310
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
311
+ target_size (`torch.Tensor`):
312
+ CogView3 uses SDXL-like micro-conditioning for target image size as explained in section 2.2 of
313
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
314
+ crop_coords (`torch.Tensor`):
315
+ CogView3 uses SDXL-like micro-conditioning for crop coordinates as explained in section 2.2 of
316
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
317
+ return_dict (`bool`, *optional*, defaults to `True`):
318
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
319
+ tuple.
320
+
321
+ Returns:
322
+ `torch.Tensor` or [`~models.transformer_2d.Transformer2DModelOutput`]:
323
+ The denoised latents using provided inputs as conditioning.
324
+ """
325
+ height, width = hidden_states.shape[-2:]
326
+ text_seq_length = encoder_hidden_states.shape[1]
327
+
328
+ hidden_states = self.patch_embed(
329
+ hidden_states, encoder_hidden_states
330
+ ) # takes care of adding positional embeddings too.
331
+ emb = self.time_condition_embed(timestep, original_size, target_size, crop_coords, hidden_states.dtype)
332
+
333
+ encoder_hidden_states = hidden_states[:, :text_seq_length]
334
+ hidden_states = hidden_states[:, text_seq_length:]
335
+
336
+ for index_block, block in enumerate(self.transformer_blocks):
337
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
338
+ hidden_states, encoder_hidden_states = self._gradient_checkpointing_func(
339
+ block,
340
+ hidden_states,
341
+ encoder_hidden_states,
342
+ emb,
343
+ )
344
+ else:
345
+ hidden_states, encoder_hidden_states = block(
346
+ hidden_states=hidden_states,
347
+ encoder_hidden_states=encoder_hidden_states,
348
+ emb=emb,
349
+ )
350
+
351
+ hidden_states = self.norm_out(hidden_states, emb)
352
+ hidden_states = self.proj_out(hidden_states) # (batch_size, height*width, patch_size*patch_size*out_channels)
353
+
354
+ # unpatchify
355
+ patch_size = self.config.patch_size
356
+ height = height // patch_size
357
+ width = width // patch_size
358
+
359
+ hidden_states = hidden_states.reshape(
360
+ shape=(hidden_states.shape[0], height, width, self.out_channels, patch_size, patch_size)
361
+ )
362
+ hidden_states = torch.einsum("nhwcpq->nchpwq", hidden_states)
363
+ output = hidden_states.reshape(
364
+ shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size)
365
+ )
366
+
367
+ if not return_dict:
368
+ return (output,)
369
+
370
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_cogview4.py ADDED
@@ -0,0 +1,788 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The CogView team, Tsinghua University & ZhipuAI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders import PeftAdapterMixin
23
+ from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
24
+ from ...utils.torch_utils import maybe_allow_in_graph
25
+ from ..attention import FeedForward
26
+ from ..attention_processor import Attention
27
+ from ..cache_utils import CacheMixin
28
+ from ..embeddings import CogView3CombinedTimestepSizeEmbeddings
29
+ from ..modeling_outputs import Transformer2DModelOutput
30
+ from ..modeling_utils import ModelMixin
31
+ from ..normalization import LayerNorm, RMSNorm
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ class CogView4PatchEmbed(nn.Module):
38
+ def __init__(
39
+ self,
40
+ in_channels: int = 16,
41
+ hidden_size: int = 2560,
42
+ patch_size: int = 2,
43
+ text_hidden_size: int = 4096,
44
+ ):
45
+ super().__init__()
46
+ self.patch_size = patch_size
47
+
48
+ self.proj = nn.Linear(in_channels * patch_size**2, hidden_size)
49
+ self.text_proj = nn.Linear(text_hidden_size, hidden_size)
50
+
51
+ def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
52
+ batch_size, channel, height, width = hidden_states.shape
53
+ post_patch_height = height // self.patch_size
54
+ post_patch_width = width // self.patch_size
55
+
56
+ hidden_states = hidden_states.reshape(
57
+ batch_size, channel, post_patch_height, self.patch_size, post_patch_width, self.patch_size
58
+ )
59
+ hidden_states = hidden_states.permute(0, 2, 4, 1, 3, 5).flatten(3, 5).flatten(1, 2)
60
+ hidden_states = self.proj(hidden_states)
61
+ encoder_hidden_states = self.text_proj(encoder_hidden_states)
62
+
63
+ return hidden_states, encoder_hidden_states
64
+
65
+
66
+ class CogView4AdaLayerNormZero(nn.Module):
67
+ def __init__(self, embedding_dim: int, dim: int) -> None:
68
+ super().__init__()
69
+
70
+ self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5)
71
+ self.norm_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5)
72
+ self.linear = nn.Linear(embedding_dim, 12 * dim, bias=True)
73
+
74
+ def forward(
75
+ self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor
76
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
77
+ dtype = hidden_states.dtype
78
+ norm_hidden_states = self.norm(hidden_states).to(dtype=dtype)
79
+ norm_encoder_hidden_states = self.norm_context(encoder_hidden_states).to(dtype=dtype)
80
+
81
+ emb = self.linear(temb)
82
+ (
83
+ shift_msa,
84
+ c_shift_msa,
85
+ scale_msa,
86
+ c_scale_msa,
87
+ gate_msa,
88
+ c_gate_msa,
89
+ shift_mlp,
90
+ c_shift_mlp,
91
+ scale_mlp,
92
+ c_scale_mlp,
93
+ gate_mlp,
94
+ c_gate_mlp,
95
+ ) = emb.chunk(12, dim=1)
96
+
97
+ hidden_states = norm_hidden_states * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
98
+ encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_msa.unsqueeze(1)) + c_shift_msa.unsqueeze(1)
99
+
100
+ return (
101
+ hidden_states,
102
+ gate_msa,
103
+ shift_mlp,
104
+ scale_mlp,
105
+ gate_mlp,
106
+ encoder_hidden_states,
107
+ c_gate_msa,
108
+ c_shift_mlp,
109
+ c_scale_mlp,
110
+ c_gate_mlp,
111
+ )
112
+
113
+
114
+ class CogView4AttnProcessor:
115
+ """
116
+ Processor for implementing scaled dot-product attention for the CogView4 model. It applies a rotary embedding on
117
+ query and key vectors, but does not include spatial normalization.
118
+
119
+ The processor supports passing an attention mask for text tokens. The attention mask should have shape (batch_size,
120
+ text_seq_length) where 1 indicates a non-padded token and 0 indicates a padded token.
121
+ """
122
+
123
+ def __init__(self):
124
+ if not hasattr(F, "scaled_dot_product_attention"):
125
+ raise ImportError("CogView4AttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0.")
126
+
127
+ def __call__(
128
+ self,
129
+ attn: Attention,
130
+ hidden_states: torch.Tensor,
131
+ encoder_hidden_states: torch.Tensor,
132
+ attention_mask: Optional[torch.Tensor] = None,
133
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
134
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
135
+ dtype = encoder_hidden_states.dtype
136
+
137
+ batch_size, text_seq_length, embed_dim = encoder_hidden_states.shape
138
+ batch_size, image_seq_length, embed_dim = hidden_states.shape
139
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
140
+
141
+ # 1. QKV projections
142
+ query = attn.to_q(hidden_states)
143
+ key = attn.to_k(hidden_states)
144
+ value = attn.to_v(hidden_states)
145
+
146
+ query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2)
147
+ key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2)
148
+ value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2)
149
+
150
+ # 2. QK normalization
151
+ if attn.norm_q is not None:
152
+ query = attn.norm_q(query).to(dtype=dtype)
153
+ if attn.norm_k is not None:
154
+ key = attn.norm_k(key).to(dtype=dtype)
155
+
156
+ # 3. Rotational positional embeddings applied to latent stream
157
+ if image_rotary_emb is not None:
158
+ from ..embeddings import apply_rotary_emb
159
+
160
+ query[:, :, text_seq_length:, :] = apply_rotary_emb(
161
+ query[:, :, text_seq_length:, :], image_rotary_emb, use_real_unbind_dim=-2
162
+ )
163
+ key[:, :, text_seq_length:, :] = apply_rotary_emb(
164
+ key[:, :, text_seq_length:, :], image_rotary_emb, use_real_unbind_dim=-2
165
+ )
166
+
167
+ # 4. Attention
168
+ if attention_mask is not None:
169
+ text_attn_mask = attention_mask
170
+ assert text_attn_mask.dim() == 2, "the shape of text_attn_mask should be (batch_size, text_seq_length)"
171
+ text_attn_mask = text_attn_mask.float().to(query.device)
172
+ mix_attn_mask = torch.ones((batch_size, text_seq_length + image_seq_length), device=query.device)
173
+ mix_attn_mask[:, :text_seq_length] = text_attn_mask
174
+ mix_attn_mask = mix_attn_mask.unsqueeze(2)
175
+ attn_mask_matrix = mix_attn_mask @ mix_attn_mask.transpose(1, 2)
176
+ attention_mask = (attn_mask_matrix > 0).unsqueeze(1).to(query.dtype)
177
+
178
+ hidden_states = F.scaled_dot_product_attention(
179
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
180
+ )
181
+ hidden_states = hidden_states.transpose(1, 2).flatten(2, 3)
182
+ hidden_states = hidden_states.type_as(query)
183
+
184
+ # 5. Output projection
185
+ hidden_states = attn.to_out[0](hidden_states)
186
+ hidden_states = attn.to_out[1](hidden_states)
187
+
188
+ encoder_hidden_states, hidden_states = hidden_states.split(
189
+ [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1
190
+ )
191
+ return hidden_states, encoder_hidden_states
192
+
193
+
194
+ class CogView4TrainingAttnProcessor:
195
+ """
196
+ Training Processor for implementing scaled dot-product attention for the CogView4 model. It applies a rotary
197
+ embedding on query and key vectors, but does not include spatial normalization.
198
+
199
+ This processor differs from CogView4AttnProcessor in several important ways:
200
+ 1. It supports attention masking with variable sequence lengths for multi-resolution training
201
+ 2. It unpacks and repacks sequences for efficient training with variable sequence lengths when batch_flag is
202
+ provided
203
+ """
204
+
205
+ def __init__(self):
206
+ if not hasattr(F, "scaled_dot_product_attention"):
207
+ raise ImportError("CogView4AttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0.")
208
+
209
+ def __call__(
210
+ self,
211
+ attn: Attention,
212
+ hidden_states: torch.Tensor,
213
+ encoder_hidden_states: torch.Tensor,
214
+ latent_attn_mask: Optional[torch.Tensor] = None,
215
+ text_attn_mask: Optional[torch.Tensor] = None,
216
+ batch_flag: Optional[torch.Tensor] = None,
217
+ image_rotary_emb: Optional[
218
+ Union[Tuple[torch.Tensor, torch.Tensor], List[Tuple[torch.Tensor, torch.Tensor]]]
219
+ ] = None,
220
+ **kwargs,
221
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
222
+ """
223
+ Args:
224
+ attn (`Attention`):
225
+ The attention module.
226
+ hidden_states (`torch.Tensor`):
227
+ The input hidden states.
228
+ encoder_hidden_states (`torch.Tensor`):
229
+ The encoder hidden states for cross-attention.
230
+ latent_attn_mask (`torch.Tensor`, *optional*):
231
+ Mask for latent tokens where 0 indicates pad token and 1 indicates non-pad token. If None, full
232
+ attention is used for all latent tokens. Note: the shape of latent_attn_mask is (batch_size,
233
+ num_latent_tokens).
234
+ text_attn_mask (`torch.Tensor`, *optional*):
235
+ Mask for text tokens where 0 indicates pad token and 1 indicates non-pad token. If None, full attention
236
+ is used for all text tokens.
237
+ batch_flag (`torch.Tensor`, *optional*):
238
+ Values from 0 to n-1 indicating which samples belong to the same batch. Samples with the same
239
+ batch_flag are packed together. Example: [0, 1, 1, 2, 2] means sample 0 forms batch0, samples 1-2 form
240
+ batch1, and samples 3-4 form batch2. If None, no packing is used.
241
+ image_rotary_emb (`Tuple[torch.Tensor, torch.Tensor]` or `list[Tuple[torch.Tensor, torch.Tensor]]`, *optional*):
242
+ The rotary embedding for the image part of the input.
243
+ Returns:
244
+ `Tuple[torch.Tensor, torch.Tensor]`: The processed hidden states for both image and text streams.
245
+ """
246
+
247
+ # Get dimensions and device info
248
+ batch_size, text_seq_length, embed_dim = encoder_hidden_states.shape
249
+ batch_size, image_seq_length, embed_dim = hidden_states.shape
250
+ dtype = encoder_hidden_states.dtype
251
+ device = encoder_hidden_states.device
252
+ latent_hidden_states = hidden_states
253
+ # Combine text and image streams for joint processing
254
+ mixed_hidden_states = torch.cat([encoder_hidden_states, latent_hidden_states], dim=1)
255
+
256
+ # 1. Construct attention mask and maybe packing input
257
+ # Create default masks if not provided
258
+ if text_attn_mask is None:
259
+ text_attn_mask = torch.ones((batch_size, text_seq_length), dtype=torch.int32, device=device)
260
+ if latent_attn_mask is None:
261
+ latent_attn_mask = torch.ones((batch_size, image_seq_length), dtype=torch.int32, device=device)
262
+
263
+ # Validate mask shapes and types
264
+ assert text_attn_mask.dim() == 2, "the shape of text_attn_mask should be (batch_size, text_seq_length)"
265
+ assert text_attn_mask.dtype == torch.int32, "the dtype of text_attn_mask should be torch.int32"
266
+ assert latent_attn_mask.dim() == 2, "the shape of latent_attn_mask should be (batch_size, num_latent_tokens)"
267
+ assert latent_attn_mask.dtype == torch.int32, "the dtype of latent_attn_mask should be torch.int32"
268
+
269
+ # Create combined mask for text and image tokens
270
+ mixed_attn_mask = torch.ones(
271
+ (batch_size, text_seq_length + image_seq_length), dtype=torch.int32, device=device
272
+ )
273
+ mixed_attn_mask[:, :text_seq_length] = text_attn_mask
274
+ mixed_attn_mask[:, text_seq_length:] = latent_attn_mask
275
+
276
+ # Convert mask to attention matrix format (where 1 means attend, 0 means don't attend)
277
+ mixed_attn_mask_input = mixed_attn_mask.unsqueeze(2).to(dtype=dtype)
278
+ attn_mask_matrix = mixed_attn_mask_input @ mixed_attn_mask_input.transpose(1, 2)
279
+
280
+ # Handle batch packing if enabled
281
+ if batch_flag is not None:
282
+ assert batch_flag.dim() == 1
283
+ # Determine packed batch size based on batch_flag
284
+ packing_batch_size = torch.max(batch_flag).item() + 1
285
+
286
+ # Calculate actual sequence lengths for each sample based on masks
287
+ text_seq_length = torch.sum(text_attn_mask, dim=1)
288
+ latent_seq_length = torch.sum(latent_attn_mask, dim=1)
289
+ mixed_seq_length = text_seq_length + latent_seq_length
290
+
291
+ # Calculate packed sequence lengths for each packed batch
292
+ mixed_seq_length_packed = [
293
+ torch.sum(mixed_attn_mask[batch_flag == batch_idx]).item() for batch_idx in range(packing_batch_size)
294
+ ]
295
+
296
+ assert len(mixed_seq_length_packed) == packing_batch_size
297
+
298
+ # Pack sequences by removing padding tokens
299
+ mixed_attn_mask_flatten = mixed_attn_mask.flatten(0, 1)
300
+ mixed_hidden_states_flatten = mixed_hidden_states.flatten(0, 1)
301
+ mixed_hidden_states_unpad = mixed_hidden_states_flatten[mixed_attn_mask_flatten == 1]
302
+ assert torch.sum(mixed_seq_length) == mixed_hidden_states_unpad.shape[0]
303
+
304
+ # Split the unpadded sequence into packed batches
305
+ mixed_hidden_states_packed = torch.split(mixed_hidden_states_unpad, mixed_seq_length_packed)
306
+
307
+ # Re-pad to create packed batches with right-side padding
308
+ mixed_hidden_states_packed_padded = torch.nn.utils.rnn.pad_sequence(
309
+ mixed_hidden_states_packed,
310
+ batch_first=True,
311
+ padding_value=0.0,
312
+ padding_side="right",
313
+ )
314
+
315
+ # Create attention mask for packed batches
316
+ l = mixed_hidden_states_packed_padded.shape[1]
317
+ attn_mask_matrix = torch.zeros(
318
+ (packing_batch_size, l, l),
319
+ dtype=dtype,
320
+ device=device,
321
+ )
322
+
323
+ # Fill attention mask with block diagonal matrices
324
+ # This ensures that tokens can only attend to other tokens within the same original sample
325
+ for idx, mask in enumerate(attn_mask_matrix):
326
+ seq_lengths = mixed_seq_length[batch_flag == idx]
327
+ offset = 0
328
+ for length in seq_lengths:
329
+ # Create a block of 1s for each sample in the packed batch
330
+ mask[offset : offset + length, offset : offset + length] = 1
331
+ offset += length
332
+
333
+ attn_mask_matrix = attn_mask_matrix.to(dtype=torch.bool)
334
+ attn_mask_matrix = attn_mask_matrix.unsqueeze(1) # Add attention head dim
335
+ attention_mask = attn_mask_matrix
336
+
337
+ # Prepare hidden states for attention computation
338
+ if batch_flag is None:
339
+ # If no packing, just combine text and image tokens
340
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
341
+ else:
342
+ # If packing, use the packed sequence
343
+ hidden_states = mixed_hidden_states_packed_padded
344
+
345
+ # 2. QKV projections - convert hidden states to query, key, value
346
+ query = attn.to_q(hidden_states)
347
+ key = attn.to_k(hidden_states)
348
+ value = attn.to_v(hidden_states)
349
+
350
+ # Reshape for multi-head attention: [batch, seq_len, heads*dim] -> [batch, heads, seq_len, dim]
351
+ query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2)
352
+ key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2)
353
+ value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2)
354
+
355
+ # 3. QK normalization - apply layer norm to queries and keys if configured
356
+ if attn.norm_q is not None:
357
+ query = attn.norm_q(query).to(dtype=dtype)
358
+ if attn.norm_k is not None:
359
+ key = attn.norm_k(key).to(dtype=dtype)
360
+
361
+ # 4. Apply rotary positional embeddings to image tokens only
362
+ if image_rotary_emb is not None:
363
+ from ..embeddings import apply_rotary_emb
364
+
365
+ if batch_flag is None:
366
+ # Apply RoPE only to image tokens (after text tokens)
367
+ query[:, :, text_seq_length:, :] = apply_rotary_emb(
368
+ query[:, :, text_seq_length:, :], image_rotary_emb, use_real_unbind_dim=-2
369
+ )
370
+ key[:, :, text_seq_length:, :] = apply_rotary_emb(
371
+ key[:, :, text_seq_length:, :], image_rotary_emb, use_real_unbind_dim=-2
372
+ )
373
+ else:
374
+ # For packed batches, need to carefully apply RoPE to appropriate tokens
375
+ assert query.shape[0] == packing_batch_size
376
+ assert key.shape[0] == packing_batch_size
377
+ assert len(image_rotary_emb) == batch_size
378
+
379
+ rope_idx = 0
380
+ for idx in range(packing_batch_size):
381
+ offset = 0
382
+ # Get text and image sequence lengths for samples in this packed batch
383
+ text_seq_length_bi = text_seq_length[batch_flag == idx]
384
+ latent_seq_length_bi = latent_seq_length[batch_flag == idx]
385
+
386
+ # Apply RoPE to each image segment in the packed sequence
387
+ for tlen, llen in zip(text_seq_length_bi, latent_seq_length_bi):
388
+ mlen = tlen + llen
389
+ # Apply RoPE only to image tokens (after text tokens)
390
+ query[idx, :, offset + tlen : offset + mlen, :] = apply_rotary_emb(
391
+ query[idx, :, offset + tlen : offset + mlen, :],
392
+ image_rotary_emb[rope_idx],
393
+ use_real_unbind_dim=-2,
394
+ )
395
+ key[idx, :, offset + tlen : offset + mlen, :] = apply_rotary_emb(
396
+ key[idx, :, offset + tlen : offset + mlen, :],
397
+ image_rotary_emb[rope_idx],
398
+ use_real_unbind_dim=-2,
399
+ )
400
+ offset += mlen
401
+ rope_idx += 1
402
+
403
+ hidden_states = F.scaled_dot_product_attention(
404
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
405
+ )
406
+
407
+ # Reshape back: [batch, heads, seq_len, dim] -> [batch, seq_len, heads*dim]
408
+ hidden_states = hidden_states.transpose(1, 2).flatten(2, 3)
409
+ hidden_states = hidden_states.type_as(query)
410
+
411
+ # 5. Output projection - project attention output to model dimension
412
+ hidden_states = attn.to_out[0](hidden_states)
413
+ hidden_states = attn.to_out[1](hidden_states)
414
+
415
+ # Split the output back into text and image streams
416
+ if batch_flag is None:
417
+ # Simple split for non-packed case
418
+ encoder_hidden_states, hidden_states = hidden_states.split(
419
+ [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1
420
+ )
421
+ else:
422
+ # For packed case: need to unpack, split text/image, then restore to original shapes
423
+ # First, unpad the sequence based on the packed sequence lengths
424
+ hidden_states_unpad = torch.nn.utils.rnn.unpad_sequence(
425
+ hidden_states,
426
+ lengths=torch.tensor(mixed_seq_length_packed),
427
+ batch_first=True,
428
+ )
429
+ # Concatenate all unpadded sequences
430
+ hidden_states_flatten = torch.cat(hidden_states_unpad, dim=0)
431
+ # Split by original sample sequence lengths
432
+ hidden_states_unpack = torch.split(hidden_states_flatten, mixed_seq_length.tolist())
433
+ assert len(hidden_states_unpack) == batch_size
434
+
435
+ # Further split each sample's sequence into text and image parts
436
+ hidden_states_unpack = [
437
+ torch.split(h, [tlen, llen])
438
+ for h, tlen, llen in zip(hidden_states_unpack, text_seq_length, latent_seq_length)
439
+ ]
440
+ # Separate text and image sequences
441
+ encoder_hidden_states_unpad = [h[0] for h in hidden_states_unpack]
442
+ hidden_states_unpad = [h[1] for h in hidden_states_unpack]
443
+
444
+ # Update the original tensors with the processed values, respecting the attention masks
445
+ for idx in range(batch_size):
446
+ # Place unpacked text tokens back in the encoder_hidden_states tensor
447
+ encoder_hidden_states[idx][text_attn_mask[idx] == 1] = encoder_hidden_states_unpad[idx]
448
+ # Place unpacked image tokens back in the latent_hidden_states tensor
449
+ latent_hidden_states[idx][latent_attn_mask[idx] == 1] = hidden_states_unpad[idx]
450
+
451
+ # Update the output hidden states
452
+ hidden_states = latent_hidden_states
453
+
454
+ return hidden_states, encoder_hidden_states
455
+
456
+
457
+ @maybe_allow_in_graph
458
+ class CogView4TransformerBlock(nn.Module):
459
+ def __init__(
460
+ self,
461
+ dim: int = 2560,
462
+ num_attention_heads: int = 64,
463
+ attention_head_dim: int = 40,
464
+ time_embed_dim: int = 512,
465
+ ) -> None:
466
+ super().__init__()
467
+
468
+ # 1. Attention
469
+ self.norm1 = CogView4AdaLayerNormZero(time_embed_dim, dim)
470
+ self.attn1 = Attention(
471
+ query_dim=dim,
472
+ heads=num_attention_heads,
473
+ dim_head=attention_head_dim,
474
+ out_dim=dim,
475
+ bias=True,
476
+ qk_norm="layer_norm",
477
+ elementwise_affine=False,
478
+ eps=1e-5,
479
+ processor=CogView4AttnProcessor(),
480
+ )
481
+
482
+ # 2. Feedforward
483
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5)
484
+ self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5)
485
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
486
+
487
+ def forward(
488
+ self,
489
+ hidden_states: torch.Tensor,
490
+ encoder_hidden_states: torch.Tensor,
491
+ temb: Optional[torch.Tensor] = None,
492
+ image_rotary_emb: Optional[
493
+ Union[Tuple[torch.Tensor, torch.Tensor], List[Tuple[torch.Tensor, torch.Tensor]]]
494
+ ] = None,
495
+ attention_mask: Optional[Dict[str, torch.Tensor]] = None,
496
+ attention_kwargs: Optional[Dict[str, Any]] = None,
497
+ ) -> torch.Tensor:
498
+ # 1. Timestep conditioning
499
+ (
500
+ norm_hidden_states,
501
+ gate_msa,
502
+ shift_mlp,
503
+ scale_mlp,
504
+ gate_mlp,
505
+ norm_encoder_hidden_states,
506
+ c_gate_msa,
507
+ c_shift_mlp,
508
+ c_scale_mlp,
509
+ c_gate_mlp,
510
+ ) = self.norm1(hidden_states, encoder_hidden_states, temb)
511
+
512
+ # 2. Attention
513
+ if attention_kwargs is None:
514
+ attention_kwargs = {}
515
+ attn_hidden_states, attn_encoder_hidden_states = self.attn1(
516
+ hidden_states=norm_hidden_states,
517
+ encoder_hidden_states=norm_encoder_hidden_states,
518
+ image_rotary_emb=image_rotary_emb,
519
+ attention_mask=attention_mask,
520
+ **attention_kwargs,
521
+ )
522
+ hidden_states = hidden_states + attn_hidden_states * gate_msa.unsqueeze(1)
523
+ encoder_hidden_states = encoder_hidden_states + attn_encoder_hidden_states * c_gate_msa.unsqueeze(1)
524
+
525
+ # 3. Feedforward
526
+ norm_hidden_states = self.norm2(hidden_states) * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
527
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) * (
528
+ 1 + c_scale_mlp.unsqueeze(1)
529
+ ) + c_shift_mlp.unsqueeze(1)
530
+
531
+ ff_output = self.ff(norm_hidden_states)
532
+ ff_output_context = self.ff(norm_encoder_hidden_states)
533
+ hidden_states = hidden_states + ff_output * gate_mlp.unsqueeze(1)
534
+ encoder_hidden_states = encoder_hidden_states + ff_output_context * c_gate_mlp.unsqueeze(1)
535
+
536
+ return hidden_states, encoder_hidden_states
537
+
538
+
539
+ class CogView4RotaryPosEmbed(nn.Module):
540
+ def __init__(self, dim: int, patch_size: int, rope_axes_dim: Tuple[int, int], theta: float = 10000.0) -> None:
541
+ super().__init__()
542
+
543
+ self.dim = dim
544
+ self.patch_size = patch_size
545
+ self.rope_axes_dim = rope_axes_dim
546
+ self.theta = theta
547
+
548
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
549
+ batch_size, num_channels, height, width = hidden_states.shape
550
+ height, width = height // self.patch_size, width // self.patch_size
551
+
552
+ dim_h, dim_w = self.dim // 2, self.dim // 2
553
+ h_inv_freq = 1.0 / (
554
+ self.theta ** (torch.arange(0, dim_h, 2, dtype=torch.float32)[: (dim_h // 2)].float() / dim_h)
555
+ )
556
+ w_inv_freq = 1.0 / (
557
+ self.theta ** (torch.arange(0, dim_w, 2, dtype=torch.float32)[: (dim_w // 2)].float() / dim_w)
558
+ )
559
+ h_seq = torch.arange(self.rope_axes_dim[0])
560
+ w_seq = torch.arange(self.rope_axes_dim[1])
561
+ freqs_h = torch.outer(h_seq, h_inv_freq)
562
+ freqs_w = torch.outer(w_seq, w_inv_freq)
563
+
564
+ h_idx = torch.arange(height, device=freqs_h.device)
565
+ w_idx = torch.arange(width, device=freqs_w.device)
566
+ inner_h_idx = h_idx * self.rope_axes_dim[0] // height
567
+ inner_w_idx = w_idx * self.rope_axes_dim[1] // width
568
+
569
+ freqs_h = freqs_h[inner_h_idx]
570
+ freqs_w = freqs_w[inner_w_idx]
571
+
572
+ # Create position matrices for height and width
573
+ # [height, 1, dim//4] and [1, width, dim//4]
574
+ freqs_h = freqs_h.unsqueeze(1)
575
+ freqs_w = freqs_w.unsqueeze(0)
576
+ # Broadcast freqs_h and freqs_w to [height, width, dim//4]
577
+ freqs_h = freqs_h.expand(height, width, -1)
578
+ freqs_w = freqs_w.expand(height, width, -1)
579
+
580
+ # Concatenate along last dimension to get [height, width, dim//2]
581
+ freqs = torch.cat([freqs_h, freqs_w], dim=-1)
582
+ freqs = torch.cat([freqs, freqs], dim=-1) # [height, width, dim]
583
+ freqs = freqs.reshape(height * width, -1)
584
+ return (freqs.cos(), freqs.sin())
585
+
586
+
587
+ class CogView4AdaLayerNormContinuous(nn.Module):
588
+ """
589
+ CogView4-only final AdaLN: LN(x) -> Linear(cond) -> chunk -> affine. Matches Megatron: **no activation** before the
590
+ Linear on conditioning embedding.
591
+ """
592
+
593
+ def __init__(
594
+ self,
595
+ embedding_dim: int,
596
+ conditioning_embedding_dim: int,
597
+ elementwise_affine: bool = True,
598
+ eps: float = 1e-5,
599
+ bias: bool = True,
600
+ norm_type: str = "layer_norm",
601
+ ):
602
+ super().__init__()
603
+ self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias)
604
+ if norm_type == "layer_norm":
605
+ self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias)
606
+ elif norm_type == "rms_norm":
607
+ self.norm = RMSNorm(embedding_dim, eps, elementwise_affine)
608
+ else:
609
+ raise ValueError(f"unknown norm_type {norm_type}")
610
+
611
+ def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor:
612
+ # *** NO SiLU here ***
613
+ emb = self.linear(conditioning_embedding.to(x.dtype))
614
+ scale, shift = torch.chunk(emb, 2, dim=1)
615
+ x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
616
+ return x
617
+
618
+
619
+ class CogView4Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, CacheMixin):
620
+ r"""
621
+ Args:
622
+ patch_size (`int`, defaults to `2`):
623
+ The size of the patches to use in the patch embedding layer.
624
+ in_channels (`int`, defaults to `16`):
625
+ The number of channels in the input.
626
+ num_layers (`int`, defaults to `30`):
627
+ The number of layers of Transformer blocks to use.
628
+ attention_head_dim (`int`, defaults to `40`):
629
+ The number of channels in each head.
630
+ num_attention_heads (`int`, defaults to `64`):
631
+ The number of heads to use for multi-head attention.
632
+ out_channels (`int`, defaults to `16`):
633
+ The number of channels in the output.
634
+ text_embed_dim (`int`, defaults to `4096`):
635
+ Input dimension of text embeddings from the text encoder.
636
+ time_embed_dim (`int`, defaults to `512`):
637
+ Output dimension of timestep embeddings.
638
+ condition_dim (`int`, defaults to `256`):
639
+ The embedding dimension of the input SDXL-style resolution conditions (original_size, target_size,
640
+ crop_coords).
641
+ pos_embed_max_size (`int`, defaults to `128`):
642
+ The maximum resolution of the positional embeddings, from which slices of shape `H x W` are taken and added
643
+ to input patched latents, where `H` and `W` are the latent height and width respectively. A value of 128
644
+ means that the maximum supported height and width for image generation is `128 * vae_scale_factor *
645
+ patch_size => 128 * 8 * 2 => 2048`.
646
+ sample_size (`int`, defaults to `128`):
647
+ The base resolution of input latents. If height/width is not provided during generation, this value is used
648
+ to determine the resolution as `sample_size * vae_scale_factor => 128 * 8 => 1024`
649
+ """
650
+
651
+ _supports_gradient_checkpointing = True
652
+ _no_split_modules = ["CogView4TransformerBlock", "CogView4PatchEmbed", "CogView4PatchEmbed"]
653
+ _skip_layerwise_casting_patterns = ["patch_embed", "norm", "proj_out"]
654
+
655
+ @register_to_config
656
+ def __init__(
657
+ self,
658
+ patch_size: int = 2,
659
+ in_channels: int = 16,
660
+ out_channels: int = 16,
661
+ num_layers: int = 30,
662
+ attention_head_dim: int = 40,
663
+ num_attention_heads: int = 64,
664
+ text_embed_dim: int = 4096,
665
+ time_embed_dim: int = 512,
666
+ condition_dim: int = 256,
667
+ pos_embed_max_size: int = 128,
668
+ sample_size: int = 128,
669
+ rope_axes_dim: Tuple[int, int] = (256, 256),
670
+ ):
671
+ super().__init__()
672
+
673
+ # CogView4 uses 3 additional SDXL-like conditions - original_size, target_size, crop_coords
674
+ # Each of these are sincos embeddings of shape 2 * condition_dim
675
+ pooled_projection_dim = 3 * 2 * condition_dim
676
+ inner_dim = num_attention_heads * attention_head_dim
677
+ out_channels = out_channels
678
+
679
+ # 1. RoPE
680
+ self.rope = CogView4RotaryPosEmbed(attention_head_dim, patch_size, rope_axes_dim, theta=10000.0)
681
+
682
+ # 2. Patch & Text-timestep embedding
683
+ self.patch_embed = CogView4PatchEmbed(in_channels, inner_dim, patch_size, text_embed_dim)
684
+
685
+ self.time_condition_embed = CogView3CombinedTimestepSizeEmbeddings(
686
+ embedding_dim=time_embed_dim,
687
+ condition_dim=condition_dim,
688
+ pooled_projection_dim=pooled_projection_dim,
689
+ timesteps_dim=inner_dim,
690
+ )
691
+
692
+ # 3. Transformer blocks
693
+ self.transformer_blocks = nn.ModuleList(
694
+ [
695
+ CogView4TransformerBlock(inner_dim, num_attention_heads, attention_head_dim, time_embed_dim)
696
+ for _ in range(num_layers)
697
+ ]
698
+ )
699
+
700
+ # 4. Output projection
701
+ self.norm_out = CogView4AdaLayerNormContinuous(inner_dim, time_embed_dim, elementwise_affine=False)
702
+ self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels, bias=True)
703
+
704
+ self.gradient_checkpointing = False
705
+
706
+ def forward(
707
+ self,
708
+ hidden_states: torch.Tensor,
709
+ encoder_hidden_states: torch.Tensor,
710
+ timestep: torch.LongTensor,
711
+ original_size: torch.Tensor,
712
+ target_size: torch.Tensor,
713
+ crop_coords: torch.Tensor,
714
+ attention_kwargs: Optional[Dict[str, Any]] = None,
715
+ return_dict: bool = True,
716
+ attention_mask: Optional[torch.Tensor] = None,
717
+ image_rotary_emb: Optional[
718
+ Union[Tuple[torch.Tensor, torch.Tensor], List[Tuple[torch.Tensor, torch.Tensor]]]
719
+ ] = None,
720
+ ) -> Union[torch.Tensor, Transformer2DModelOutput]:
721
+ if attention_kwargs is not None:
722
+ attention_kwargs = attention_kwargs.copy()
723
+ lora_scale = attention_kwargs.pop("scale", 1.0)
724
+ else:
725
+ lora_scale = 1.0
726
+
727
+ if USE_PEFT_BACKEND:
728
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
729
+ scale_lora_layers(self, lora_scale)
730
+ else:
731
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
732
+ logger.warning(
733
+ "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective."
734
+ )
735
+
736
+ batch_size, num_channels, height, width = hidden_states.shape
737
+
738
+ # 1. RoPE
739
+ if image_rotary_emb is None:
740
+ image_rotary_emb = self.rope(hidden_states)
741
+
742
+ # 2. Patch & Timestep embeddings
743
+ p = self.config.patch_size
744
+ post_patch_height = height // p
745
+ post_patch_width = width // p
746
+
747
+ hidden_states, encoder_hidden_states = self.patch_embed(hidden_states, encoder_hidden_states)
748
+
749
+ temb = self.time_condition_embed(timestep, original_size, target_size, crop_coords, hidden_states.dtype)
750
+ temb = F.silu(temb)
751
+
752
+ # 3. Transformer blocks
753
+ for block in self.transformer_blocks:
754
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
755
+ hidden_states, encoder_hidden_states = self._gradient_checkpointing_func(
756
+ block,
757
+ hidden_states,
758
+ encoder_hidden_states,
759
+ temb,
760
+ image_rotary_emb,
761
+ attention_mask,
762
+ attention_kwargs,
763
+ )
764
+ else:
765
+ hidden_states, encoder_hidden_states = block(
766
+ hidden_states,
767
+ encoder_hidden_states,
768
+ temb,
769
+ image_rotary_emb,
770
+ attention_mask,
771
+ attention_kwargs,
772
+ )
773
+
774
+ # 4. Output norm & projection
775
+ hidden_states = self.norm_out(hidden_states, temb)
776
+ hidden_states = self.proj_out(hidden_states)
777
+
778
+ # 5. Unpatchify
779
+ hidden_states = hidden_states.reshape(batch_size, post_patch_height, post_patch_width, -1, p, p)
780
+ output = hidden_states.permute(0, 3, 1, 4, 2, 5).flatten(4, 5).flatten(2, 3)
781
+
782
+ if USE_PEFT_BACKEND:
783
+ # remove `lora_scale` from each PEFT layer
784
+ unscale_lora_layers(self, lora_scale)
785
+
786
+ if not return_dict:
787
+ return (output,)
788
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_cosmos.py ADDED
@@ -0,0 +1,586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The NVIDIA Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Optional, Tuple
16
+
17
+ import numpy as np
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...loaders import FromOriginalModelMixin
24
+ from ...utils import is_torchvision_available
25
+ from ..attention import FeedForward
26
+ from ..attention_processor import Attention
27
+ from ..embeddings import Timesteps
28
+ from ..modeling_outputs import Transformer2DModelOutput
29
+ from ..modeling_utils import ModelMixin
30
+ from ..normalization import RMSNorm
31
+
32
+
33
+ if is_torchvision_available():
34
+ from torchvision import transforms
35
+
36
+
37
+ class CosmosPatchEmbed(nn.Module):
38
+ def __init__(
39
+ self, in_channels: int, out_channels: int, patch_size: Tuple[int, int, int], bias: bool = True
40
+ ) -> None:
41
+ super().__init__()
42
+ self.patch_size = patch_size
43
+
44
+ self.proj = nn.Linear(in_channels * patch_size[0] * patch_size[1] * patch_size[2], out_channels, bias=bias)
45
+
46
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
47
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
48
+ p_t, p_h, p_w = self.patch_size
49
+ hidden_states = hidden_states.reshape(
50
+ batch_size, num_channels, num_frames // p_t, p_t, height // p_h, p_h, width // p_w, p_w
51
+ )
52
+ hidden_states = hidden_states.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7)
53
+ hidden_states = self.proj(hidden_states)
54
+ return hidden_states
55
+
56
+
57
+ class CosmosTimestepEmbedding(nn.Module):
58
+ def __init__(self, in_features: int, out_features: int) -> None:
59
+ super().__init__()
60
+ self.linear_1 = nn.Linear(in_features, out_features, bias=False)
61
+ self.activation = nn.SiLU()
62
+ self.linear_2 = nn.Linear(out_features, 3 * out_features, bias=False)
63
+
64
+ def forward(self, timesteps: torch.Tensor) -> torch.Tensor:
65
+ emb = self.linear_1(timesteps)
66
+ emb = self.activation(emb)
67
+ emb = self.linear_2(emb)
68
+ return emb
69
+
70
+
71
+ class CosmosEmbedding(nn.Module):
72
+ def __init__(self, embedding_dim: int, condition_dim: int) -> None:
73
+ super().__init__()
74
+
75
+ self.time_proj = Timesteps(embedding_dim, flip_sin_to_cos=True, downscale_freq_shift=0.0)
76
+ self.t_embedder = CosmosTimestepEmbedding(embedding_dim, condition_dim)
77
+ self.norm = RMSNorm(embedding_dim, eps=1e-6, elementwise_affine=True)
78
+
79
+ def forward(self, hidden_states: torch.Tensor, timestep: torch.LongTensor) -> torch.Tensor:
80
+ timesteps_proj = self.time_proj(timestep).type_as(hidden_states)
81
+ temb = self.t_embedder(timesteps_proj)
82
+ embedded_timestep = self.norm(timesteps_proj)
83
+ return temb, embedded_timestep
84
+
85
+
86
+ class CosmosAdaLayerNorm(nn.Module):
87
+ def __init__(self, in_features: int, hidden_features: int) -> None:
88
+ super().__init__()
89
+ self.embedding_dim = in_features
90
+
91
+ self.activation = nn.SiLU()
92
+ self.norm = nn.LayerNorm(in_features, elementwise_affine=False, eps=1e-6)
93
+ self.linear_1 = nn.Linear(in_features, hidden_features, bias=False)
94
+ self.linear_2 = nn.Linear(hidden_features, 2 * in_features, bias=False)
95
+
96
+ def forward(
97
+ self, hidden_states: torch.Tensor, embedded_timestep: torch.Tensor, temb: Optional[torch.Tensor] = None
98
+ ) -> torch.Tensor:
99
+ embedded_timestep = self.activation(embedded_timestep)
100
+ embedded_timestep = self.linear_1(embedded_timestep)
101
+ embedded_timestep = self.linear_2(embedded_timestep)
102
+
103
+ if temb is not None:
104
+ embedded_timestep = embedded_timestep + temb[..., : 2 * self.embedding_dim]
105
+
106
+ shift, scale = embedded_timestep.chunk(2, dim=-1)
107
+ hidden_states = self.norm(hidden_states)
108
+
109
+ if embedded_timestep.ndim == 2:
110
+ shift, scale = (x.unsqueeze(1) for x in (shift, scale))
111
+
112
+ hidden_states = hidden_states * (1 + scale) + shift
113
+ return hidden_states
114
+
115
+
116
+ class CosmosAdaLayerNormZero(nn.Module):
117
+ def __init__(self, in_features: int, hidden_features: Optional[int] = None) -> None:
118
+ super().__init__()
119
+
120
+ self.norm = nn.LayerNorm(in_features, elementwise_affine=False, eps=1e-6)
121
+ self.activation = nn.SiLU()
122
+
123
+ if hidden_features is None:
124
+ self.linear_1 = nn.Identity()
125
+ else:
126
+ self.linear_1 = nn.Linear(in_features, hidden_features, bias=False)
127
+
128
+ self.linear_2 = nn.Linear(hidden_features, 3 * in_features, bias=False)
129
+
130
+ def forward(
131
+ self,
132
+ hidden_states: torch.Tensor,
133
+ embedded_timestep: torch.Tensor,
134
+ temb: Optional[torch.Tensor] = None,
135
+ ) -> torch.Tensor:
136
+ embedded_timestep = self.activation(embedded_timestep)
137
+ embedded_timestep = self.linear_1(embedded_timestep)
138
+ embedded_timestep = self.linear_2(embedded_timestep)
139
+
140
+ if temb is not None:
141
+ embedded_timestep = embedded_timestep + temb
142
+
143
+ shift, scale, gate = embedded_timestep.chunk(3, dim=-1)
144
+ hidden_states = self.norm(hidden_states)
145
+
146
+ if embedded_timestep.ndim == 2:
147
+ shift, scale, gate = (x.unsqueeze(1) for x in (shift, scale, gate))
148
+
149
+ hidden_states = hidden_states * (1 + scale) + shift
150
+ return hidden_states, gate
151
+
152
+
153
+ class CosmosAttnProcessor2_0:
154
+ def __init__(self):
155
+ if not hasattr(F, "scaled_dot_product_attention"):
156
+ raise ImportError("CosmosAttnProcessor2_0 requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0.")
157
+
158
+ def __call__(
159
+ self,
160
+ attn: Attention,
161
+ hidden_states: torch.Tensor,
162
+ encoder_hidden_states: Optional[torch.Tensor] = None,
163
+ attention_mask: Optional[torch.Tensor] = None,
164
+ image_rotary_emb: Optional[torch.Tensor] = None,
165
+ ) -> torch.Tensor:
166
+ # 1. QKV projections
167
+ if encoder_hidden_states is None:
168
+ encoder_hidden_states = hidden_states
169
+
170
+ query = attn.to_q(hidden_states)
171
+ key = attn.to_k(encoder_hidden_states)
172
+ value = attn.to_v(encoder_hidden_states)
173
+
174
+ query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2)
175
+ key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2)
176
+ value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2)
177
+
178
+ # 2. QK normalization
179
+ query = attn.norm_q(query)
180
+ key = attn.norm_k(key)
181
+
182
+ # 3. Apply RoPE
183
+ if image_rotary_emb is not None:
184
+ from ..embeddings import apply_rotary_emb
185
+
186
+ query = apply_rotary_emb(query, image_rotary_emb, use_real=True, use_real_unbind_dim=-2)
187
+ key = apply_rotary_emb(key, image_rotary_emb, use_real=True, use_real_unbind_dim=-2)
188
+
189
+ # 4. Prepare for GQA
190
+ if torch.onnx.is_in_onnx_export():
191
+ query_idx = torch.tensor(query.size(3), device=query.device)
192
+ key_idx = torch.tensor(key.size(3), device=key.device)
193
+ value_idx = torch.tensor(value.size(3), device=value.device)
194
+
195
+ else:
196
+ query_idx = query.size(3)
197
+ key_idx = key.size(3)
198
+ value_idx = value.size(3)
199
+ key = key.repeat_interleave(query_idx // key_idx, dim=3)
200
+ value = value.repeat_interleave(query_idx // value_idx, dim=3)
201
+
202
+ # 5. Attention
203
+ hidden_states = F.scaled_dot_product_attention(
204
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
205
+ )
206
+ hidden_states = hidden_states.transpose(1, 2).flatten(2, 3).type_as(query)
207
+
208
+ # 6. Output projection
209
+ hidden_states = attn.to_out[0](hidden_states)
210
+ hidden_states = attn.to_out[1](hidden_states)
211
+
212
+ return hidden_states
213
+
214
+
215
+ class CosmosTransformerBlock(nn.Module):
216
+ def __init__(
217
+ self,
218
+ num_attention_heads: int,
219
+ attention_head_dim: int,
220
+ cross_attention_dim: int,
221
+ mlp_ratio: float = 4.0,
222
+ adaln_lora_dim: int = 256,
223
+ qk_norm: str = "rms_norm",
224
+ out_bias: bool = False,
225
+ ) -> None:
226
+ super().__init__()
227
+
228
+ hidden_size = num_attention_heads * attention_head_dim
229
+
230
+ self.norm1 = CosmosAdaLayerNormZero(in_features=hidden_size, hidden_features=adaln_lora_dim)
231
+ self.attn1 = Attention(
232
+ query_dim=hidden_size,
233
+ cross_attention_dim=None,
234
+ heads=num_attention_heads,
235
+ dim_head=attention_head_dim,
236
+ qk_norm=qk_norm,
237
+ elementwise_affine=True,
238
+ out_bias=out_bias,
239
+ processor=CosmosAttnProcessor2_0(),
240
+ )
241
+
242
+ self.norm2 = CosmosAdaLayerNormZero(in_features=hidden_size, hidden_features=adaln_lora_dim)
243
+ self.attn2 = Attention(
244
+ query_dim=hidden_size,
245
+ cross_attention_dim=cross_attention_dim,
246
+ heads=num_attention_heads,
247
+ dim_head=attention_head_dim,
248
+ qk_norm=qk_norm,
249
+ elementwise_affine=True,
250
+ out_bias=out_bias,
251
+ processor=CosmosAttnProcessor2_0(),
252
+ )
253
+
254
+ self.norm3 = CosmosAdaLayerNormZero(in_features=hidden_size, hidden_features=adaln_lora_dim)
255
+ self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu", bias=out_bias)
256
+
257
+ def forward(
258
+ self,
259
+ hidden_states: torch.Tensor,
260
+ encoder_hidden_states: torch.Tensor,
261
+ embedded_timestep: torch.Tensor,
262
+ temb: Optional[torch.Tensor] = None,
263
+ image_rotary_emb: Optional[torch.Tensor] = None,
264
+ extra_pos_emb: Optional[torch.Tensor] = None,
265
+ attention_mask: Optional[torch.Tensor] = None,
266
+ ) -> torch.Tensor:
267
+ if extra_pos_emb is not None:
268
+ hidden_states = hidden_states + extra_pos_emb
269
+
270
+ # 1. Self Attention
271
+ norm_hidden_states, gate = self.norm1(hidden_states, embedded_timestep, temb)
272
+ attn_output = self.attn1(norm_hidden_states, image_rotary_emb=image_rotary_emb)
273
+ hidden_states = hidden_states + gate * attn_output
274
+
275
+ # 2. Cross Attention
276
+ norm_hidden_states, gate = self.norm2(hidden_states, embedded_timestep, temb)
277
+ attn_output = self.attn2(
278
+ norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask
279
+ )
280
+ hidden_states = hidden_states + gate * attn_output
281
+
282
+ # 3. Feed Forward
283
+ norm_hidden_states, gate = self.norm3(hidden_states, embedded_timestep, temb)
284
+ ff_output = self.ff(norm_hidden_states)
285
+ hidden_states = hidden_states + gate * ff_output
286
+
287
+ return hidden_states
288
+
289
+
290
+ class CosmosRotaryPosEmbed(nn.Module):
291
+ def __init__(
292
+ self,
293
+ hidden_size: int,
294
+ max_size: Tuple[int, int, int] = (128, 240, 240),
295
+ patch_size: Tuple[int, int, int] = (1, 2, 2),
296
+ base_fps: int = 24,
297
+ rope_scale: Tuple[float, float, float] = (2.0, 1.0, 1.0),
298
+ ) -> None:
299
+ super().__init__()
300
+
301
+ self.max_size = [size // patch for size, patch in zip(max_size, patch_size)]
302
+ self.patch_size = patch_size
303
+ self.base_fps = base_fps
304
+
305
+ self.dim_h = hidden_size // 6 * 2
306
+ self.dim_w = hidden_size // 6 * 2
307
+ self.dim_t = hidden_size - self.dim_h - self.dim_w
308
+
309
+ self.h_ntk_factor = rope_scale[1] ** (self.dim_h / (self.dim_h - 2))
310
+ self.w_ntk_factor = rope_scale[2] ** (self.dim_w / (self.dim_w - 2))
311
+ self.t_ntk_factor = rope_scale[0] ** (self.dim_t / (self.dim_t - 2))
312
+
313
+ def forward(self, hidden_states: torch.Tensor, fps: Optional[int] = None) -> Tuple[torch.Tensor, torch.Tensor]:
314
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
315
+ pe_size = [num_frames // self.patch_size[0], height // self.patch_size[1], width // self.patch_size[2]]
316
+ device = hidden_states.device
317
+
318
+ h_theta = 10000.0 * self.h_ntk_factor
319
+ w_theta = 10000.0 * self.w_ntk_factor
320
+ t_theta = 10000.0 * self.t_ntk_factor
321
+
322
+ seq = torch.arange(max(self.max_size), device=device, dtype=torch.float32)
323
+ dim_h_range = (
324
+ torch.arange(0, self.dim_h, 2, device=device, dtype=torch.float32)[: (self.dim_h // 2)] / self.dim_h
325
+ )
326
+ dim_w_range = (
327
+ torch.arange(0, self.dim_w, 2, device=device, dtype=torch.float32)[: (self.dim_w // 2)] / self.dim_w
328
+ )
329
+ dim_t_range = (
330
+ torch.arange(0, self.dim_t, 2, device=device, dtype=torch.float32)[: (self.dim_t // 2)] / self.dim_t
331
+ )
332
+ h_spatial_freqs = 1.0 / (h_theta**dim_h_range)
333
+ w_spatial_freqs = 1.0 / (w_theta**dim_w_range)
334
+ temporal_freqs = 1.0 / (t_theta**dim_t_range)
335
+
336
+ emb_h = torch.outer(seq[: pe_size[1]], h_spatial_freqs)[None, :, None, :].repeat(pe_size[0], 1, pe_size[2], 1)
337
+ emb_w = torch.outer(seq[: pe_size[2]], w_spatial_freqs)[None, None, :, :].repeat(pe_size[0], pe_size[1], 1, 1)
338
+
339
+ # Apply sequence scaling in temporal dimension
340
+ if fps is None:
341
+ # Images
342
+ emb_t = torch.outer(seq[: pe_size[0]], temporal_freqs)
343
+ else:
344
+ # Videos
345
+ emb_t = torch.outer(seq[: pe_size[0]] / fps * self.base_fps, temporal_freqs)
346
+
347
+ emb_t = emb_t[:, None, None, :].repeat(1, pe_size[1], pe_size[2], 1)
348
+ freqs = torch.cat([emb_t, emb_h, emb_w] * 2, dim=-1).flatten(0, 2).float()
349
+ cos = torch.cos(freqs)
350
+ sin = torch.sin(freqs)
351
+ return cos, sin
352
+
353
+
354
+ class CosmosLearnablePositionalEmbed(nn.Module):
355
+ def __init__(
356
+ self,
357
+ hidden_size: int,
358
+ max_size: Tuple[int, int, int],
359
+ patch_size: Tuple[int, int, int],
360
+ eps: float = 1e-6,
361
+ ) -> None:
362
+ super().__init__()
363
+
364
+ self.max_size = [size // patch for size, patch in zip(max_size, patch_size)]
365
+ self.patch_size = patch_size
366
+ self.eps = eps
367
+
368
+ self.pos_emb_t = nn.Parameter(torch.zeros(self.max_size[0], hidden_size))
369
+ self.pos_emb_h = nn.Parameter(torch.zeros(self.max_size[1], hidden_size))
370
+ self.pos_emb_w = nn.Parameter(torch.zeros(self.max_size[2], hidden_size))
371
+
372
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
373
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
374
+ pe_size = [num_frames // self.patch_size[0], height // self.patch_size[1], width // self.patch_size[2]]
375
+
376
+ emb_t = self.pos_emb_t[: pe_size[0]][None, :, None, None, :].repeat(batch_size, 1, pe_size[1], pe_size[2], 1)
377
+ emb_h = self.pos_emb_h[: pe_size[1]][None, None, :, None, :].repeat(batch_size, pe_size[0], 1, pe_size[2], 1)
378
+ emb_w = self.pos_emb_w[: pe_size[2]][None, None, None, :, :].repeat(batch_size, pe_size[0], pe_size[1], 1, 1)
379
+ emb = emb_t + emb_h + emb_w
380
+ emb = emb.flatten(1, 3)
381
+
382
+ norm = torch.linalg.vector_norm(emb, dim=-1, keepdim=True, dtype=torch.float32)
383
+ norm = torch.add(self.eps, norm, alpha=np.sqrt(norm.numel() / emb.numel()))
384
+ return (emb / norm).type_as(hidden_states)
385
+
386
+
387
+ class CosmosTransformer3DModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
388
+ r"""
389
+ A Transformer model for video-like data used in [Cosmos](https://github.com/NVIDIA/Cosmos).
390
+
391
+ Args:
392
+ in_channels (`int`, defaults to `16`):
393
+ The number of channels in the input.
394
+ out_channels (`int`, defaults to `16`):
395
+ The number of channels in the output.
396
+ num_attention_heads (`int`, defaults to `32`):
397
+ The number of heads to use for multi-head attention.
398
+ attention_head_dim (`int`, defaults to `128`):
399
+ The number of channels in each attention head.
400
+ num_layers (`int`, defaults to `28`):
401
+ The number of layers of transformer blocks to use.
402
+ mlp_ratio (`float`, defaults to `4.0`):
403
+ The ratio of the hidden layer size to the input size in the feedforward network.
404
+ text_embed_dim (`int`, defaults to `4096`):
405
+ Input dimension of text embeddings from the text encoder.
406
+ adaln_lora_dim (`int`, defaults to `256`):
407
+ The hidden dimension of the Adaptive LayerNorm LoRA layer.
408
+ max_size (`Tuple[int, int, int]`, defaults to `(128, 240, 240)`):
409
+ The maximum size of the input latent tensors in the temporal, height, and width dimensions.
410
+ patch_size (`Tuple[int, int, int]`, defaults to `(1, 2, 2)`):
411
+ The patch size to use for patchifying the input latent tensors in the temporal, height, and width
412
+ dimensions.
413
+ rope_scale (`Tuple[float, float, float]`, defaults to `(2.0, 1.0, 1.0)`):
414
+ The scaling factor to use for RoPE in the temporal, height, and width dimensions.
415
+ concat_padding_mask (`bool`, defaults to `True`):
416
+ Whether to concatenate the padding mask to the input latent tensors.
417
+ extra_pos_embed_type (`str`, *optional*, defaults to `learnable`):
418
+ The type of extra positional embeddings to use. Can be one of `None` or `learnable`.
419
+ """
420
+
421
+ _supports_gradient_checkpointing = True
422
+ _skip_layerwise_casting_patterns = ["patch_embed", "final_layer", "norm"]
423
+ _no_split_modules = ["CosmosTransformerBlock"]
424
+ _keep_in_fp32_modules = ["learnable_pos_embed"]
425
+
426
+ @register_to_config
427
+ def __init__(
428
+ self,
429
+ in_channels: int = 16,
430
+ out_channels: int = 16,
431
+ num_attention_heads: int = 32,
432
+ attention_head_dim: int = 128,
433
+ num_layers: int = 28,
434
+ mlp_ratio: float = 4.0,
435
+ text_embed_dim: int = 1024,
436
+ adaln_lora_dim: int = 256,
437
+ max_size: Tuple[int, int, int] = (128, 240, 240),
438
+ patch_size: Tuple[int, int, int] = (1, 2, 2),
439
+ rope_scale: Tuple[float, float, float] = (2.0, 1.0, 1.0),
440
+ concat_padding_mask: bool = True,
441
+ extra_pos_embed_type: Optional[str] = "learnable",
442
+ ) -> None:
443
+ super().__init__()
444
+ hidden_size = num_attention_heads * attention_head_dim
445
+
446
+ # 1. Patch Embedding
447
+ patch_embed_in_channels = in_channels + 1 if concat_padding_mask else in_channels
448
+ self.patch_embed = CosmosPatchEmbed(patch_embed_in_channels, hidden_size, patch_size, bias=False)
449
+
450
+ # 2. Positional Embedding
451
+ self.rope = CosmosRotaryPosEmbed(
452
+ hidden_size=attention_head_dim, max_size=max_size, patch_size=patch_size, rope_scale=rope_scale
453
+ )
454
+
455
+ self.learnable_pos_embed = None
456
+ if extra_pos_embed_type == "learnable":
457
+ self.learnable_pos_embed = CosmosLearnablePositionalEmbed(
458
+ hidden_size=hidden_size,
459
+ max_size=max_size,
460
+ patch_size=patch_size,
461
+ )
462
+
463
+ # 3. Time Embedding
464
+ self.time_embed = CosmosEmbedding(hidden_size, hidden_size)
465
+
466
+ # 4. Transformer Blocks
467
+ self.transformer_blocks = nn.ModuleList(
468
+ [
469
+ CosmosTransformerBlock(
470
+ num_attention_heads=num_attention_heads,
471
+ attention_head_dim=attention_head_dim,
472
+ cross_attention_dim=text_embed_dim,
473
+ mlp_ratio=mlp_ratio,
474
+ adaln_lora_dim=adaln_lora_dim,
475
+ qk_norm="rms_norm",
476
+ out_bias=False,
477
+ )
478
+ for _ in range(num_layers)
479
+ ]
480
+ )
481
+
482
+ # 5. Output norm & projection
483
+ self.norm_out = CosmosAdaLayerNorm(hidden_size, adaln_lora_dim)
484
+ self.proj_out = nn.Linear(
485
+ hidden_size, patch_size[0] * patch_size[1] * patch_size[2] * out_channels, bias=False
486
+ )
487
+
488
+ self.gradient_checkpointing = False
489
+
490
+ def forward(
491
+ self,
492
+ hidden_states: torch.Tensor,
493
+ timestep: torch.Tensor,
494
+ encoder_hidden_states: torch.Tensor,
495
+ attention_mask: Optional[torch.Tensor] = None,
496
+ fps: Optional[int] = None,
497
+ condition_mask: Optional[torch.Tensor] = None,
498
+ padding_mask: Optional[torch.Tensor] = None,
499
+ return_dict: bool = True,
500
+ ) -> torch.Tensor:
501
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
502
+
503
+ # 1. Concatenate padding mask if needed & prepare attention mask
504
+ if condition_mask is not None:
505
+ hidden_states = torch.cat([hidden_states, condition_mask], dim=1)
506
+
507
+ if self.config.concat_padding_mask:
508
+ padding_mask = transforms.functional.resize(
509
+ padding_mask, list(hidden_states.shape[-2:]), interpolation=transforms.InterpolationMode.NEAREST
510
+ )
511
+ hidden_states = torch.cat(
512
+ [hidden_states, padding_mask.unsqueeze(2).repeat(batch_size, 1, num_frames, 1, 1)], dim=1
513
+ )
514
+
515
+ if attention_mask is not None:
516
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(1) # [B, 1, 1, S]
517
+
518
+ # 2. Generate positional embeddings
519
+ image_rotary_emb = self.rope(hidden_states, fps=fps)
520
+ extra_pos_emb = self.learnable_pos_embed(hidden_states) if self.config.extra_pos_embed_type else None
521
+
522
+ # 3. Patchify input
523
+ p_t, p_h, p_w = self.config.patch_size
524
+ post_patch_num_frames = num_frames // p_t
525
+ post_patch_height = height // p_h
526
+ post_patch_width = width // p_w
527
+ hidden_states = self.patch_embed(hidden_states)
528
+ hidden_states = hidden_states.flatten(1, 3) # [B, T, H, W, C] -> [B, THW, C]
529
+
530
+ # 4. Timestep embeddings
531
+ if timestep.ndim == 1:
532
+ temb, embedded_timestep = self.time_embed(hidden_states, timestep)
533
+ elif timestep.ndim == 5:
534
+ assert timestep.shape == (batch_size, 1, num_frames, 1, 1), (
535
+ f"Expected timestep to have shape [B, 1, T, 1, 1], but got {timestep.shape}"
536
+ )
537
+ timestep = timestep.flatten()
538
+ temb, embedded_timestep = self.time_embed(hidden_states, timestep)
539
+ # We can do this because num_frames == post_patch_num_frames, as p_t is 1
540
+ temb, embedded_timestep = (
541
+ x.view(batch_size, post_patch_num_frames, 1, 1, -1)
542
+ .expand(-1, -1, post_patch_height, post_patch_width, -1)
543
+ .flatten(1, 3)
544
+ for x in (temb, embedded_timestep)
545
+ ) # [BT, C] -> [B, T, 1, 1, C] -> [B, T, H, W, C] -> [B, THW, C]
546
+ else:
547
+ assert False
548
+
549
+ # 5. Transformer blocks
550
+ for block in self.transformer_blocks:
551
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
552
+ hidden_states = self._gradient_checkpointing_func(
553
+ block,
554
+ hidden_states,
555
+ encoder_hidden_states,
556
+ embedded_timestep,
557
+ temb,
558
+ image_rotary_emb,
559
+ extra_pos_emb,
560
+ attention_mask,
561
+ )
562
+ else:
563
+ hidden_states = block(
564
+ hidden_states=hidden_states,
565
+ encoder_hidden_states=encoder_hidden_states,
566
+ embedded_timestep=embedded_timestep,
567
+ temb=temb,
568
+ image_rotary_emb=image_rotary_emb,
569
+ extra_pos_emb=extra_pos_emb,
570
+ attention_mask=attention_mask,
571
+ )
572
+
573
+ # 6. Output norm & projection & unpatchify
574
+ hidden_states = self.norm_out(hidden_states, embedded_timestep, temb)
575
+ hidden_states = self.proj_out(hidden_states)
576
+ hidden_states = hidden_states.unflatten(2, (p_h, p_w, p_t, -1))
577
+ hidden_states = hidden_states.unflatten(1, (post_patch_num_frames, post_patch_height, post_patch_width))
578
+ # NOTE: The permutation order here is not the inverse operation of what happens when patching as usually expected.
579
+ # It might be a source of confusion to the reader, but this is correct
580
+ hidden_states = hidden_states.permute(0, 7, 1, 6, 2, 4, 3, 5)
581
+ hidden_states = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3)
582
+
583
+ if not return_dict:
584
+ return (hidden_states,)
585
+
586
+ return Transformer2DModelOutput(sample=hidden_states)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_easyanimate.py ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The EasyAnimate team and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn.functional as F
20
+ from torch import nn
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...utils import logging
24
+ from ...utils.torch_utils import maybe_allow_in_graph
25
+ from ..attention import Attention, FeedForward
26
+ from ..embeddings import TimestepEmbedding, Timesteps, get_3d_rotary_pos_embed
27
+ from ..modeling_outputs import Transformer2DModelOutput
28
+ from ..modeling_utils import ModelMixin
29
+ from ..normalization import AdaLayerNorm, FP32LayerNorm, RMSNorm
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+
35
+ class EasyAnimateLayerNormZero(nn.Module):
36
+ def __init__(
37
+ self,
38
+ conditioning_dim: int,
39
+ embedding_dim: int,
40
+ elementwise_affine: bool = True,
41
+ eps: float = 1e-5,
42
+ bias: bool = True,
43
+ norm_type: str = "fp32_layer_norm",
44
+ ) -> None:
45
+ super().__init__()
46
+
47
+ self.silu = nn.SiLU()
48
+ self.linear = nn.Linear(conditioning_dim, 6 * embedding_dim, bias=bias)
49
+
50
+ if norm_type == "layer_norm":
51
+ self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, eps=eps)
52
+ elif norm_type == "fp32_layer_norm":
53
+ self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, eps=eps)
54
+ else:
55
+ raise ValueError(
56
+ f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'."
57
+ )
58
+
59
+ def forward(
60
+ self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor
61
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
62
+ shift, scale, gate, enc_shift, enc_scale, enc_gate = self.linear(self.silu(temb)).chunk(6, dim=1)
63
+ hidden_states = self.norm(hidden_states) * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
64
+ encoder_hidden_states = self.norm(encoder_hidden_states) * (1 + enc_scale.unsqueeze(1)) + enc_shift.unsqueeze(
65
+ 1
66
+ )
67
+ return hidden_states, encoder_hidden_states, gate, enc_gate
68
+
69
+
70
+ class EasyAnimateRotaryPosEmbed(nn.Module):
71
+ def __init__(self, patch_size: int, rope_dim: List[int]) -> None:
72
+ super().__init__()
73
+
74
+ self.patch_size = patch_size
75
+ self.rope_dim = rope_dim
76
+
77
+ def get_resize_crop_region_for_grid(self, src, tgt_width, tgt_height):
78
+ tw = tgt_width
79
+ th = tgt_height
80
+ h, w = src
81
+ r = h / w
82
+ if r > (th / tw):
83
+ resize_height = th
84
+ resize_width = int(round(th / h * w))
85
+ else:
86
+ resize_width = tw
87
+ resize_height = int(round(tw / w * h))
88
+
89
+ crop_top = int(round((th - resize_height) / 2.0))
90
+ crop_left = int(round((tw - resize_width) / 2.0))
91
+
92
+ return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)
93
+
94
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
95
+ bs, c, num_frames, grid_height, grid_width = hidden_states.size()
96
+ grid_height = grid_height // self.patch_size
97
+ grid_width = grid_width // self.patch_size
98
+ base_size_width = 90 // self.patch_size
99
+ base_size_height = 60 // self.patch_size
100
+
101
+ grid_crops_coords = self.get_resize_crop_region_for_grid(
102
+ (grid_height, grid_width), base_size_width, base_size_height
103
+ )
104
+ image_rotary_emb = get_3d_rotary_pos_embed(
105
+ self.rope_dim,
106
+ grid_crops_coords,
107
+ grid_size=(grid_height, grid_width),
108
+ temporal_size=hidden_states.size(2),
109
+ use_real=True,
110
+ )
111
+ return image_rotary_emb
112
+
113
+
114
+ class EasyAnimateAttnProcessor2_0:
115
+ r"""
116
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is
117
+ used in the EasyAnimateTransformer3DModel model.
118
+ """
119
+
120
+ def __init__(self):
121
+ if not hasattr(F, "scaled_dot_product_attention"):
122
+ raise ImportError(
123
+ "EasyAnimateAttnProcessor2_0 requires PyTorch 2.0 or above. To use it, please install PyTorch 2.0."
124
+ )
125
+
126
+ def __call__(
127
+ self,
128
+ attn: Attention,
129
+ hidden_states: torch.Tensor,
130
+ encoder_hidden_states: torch.Tensor,
131
+ attention_mask: Optional[torch.Tensor] = None,
132
+ image_rotary_emb: Optional[torch.Tensor] = None,
133
+ ) -> torch.Tensor:
134
+ if attn.add_q_proj is None and encoder_hidden_states is not None:
135
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
136
+
137
+ # 1. QKV projections
138
+ query = attn.to_q(hidden_states)
139
+ key = attn.to_k(hidden_states)
140
+ value = attn.to_v(hidden_states)
141
+
142
+ query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2)
143
+ key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2)
144
+ value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2)
145
+
146
+ # 2. QK normalization
147
+ if attn.norm_q is not None:
148
+ query = attn.norm_q(query)
149
+ if attn.norm_k is not None:
150
+ key = attn.norm_k(key)
151
+
152
+ # 3. Encoder condition QKV projection and normalization
153
+ if attn.add_q_proj is not None and encoder_hidden_states is not None:
154
+ encoder_query = attn.add_q_proj(encoder_hidden_states)
155
+ encoder_key = attn.add_k_proj(encoder_hidden_states)
156
+ encoder_value = attn.add_v_proj(encoder_hidden_states)
157
+
158
+ encoder_query = encoder_query.unflatten(2, (attn.heads, -1)).transpose(1, 2)
159
+ encoder_key = encoder_key.unflatten(2, (attn.heads, -1)).transpose(1, 2)
160
+ encoder_value = encoder_value.unflatten(2, (attn.heads, -1)).transpose(1, 2)
161
+
162
+ if attn.norm_added_q is not None:
163
+ encoder_query = attn.norm_added_q(encoder_query)
164
+ if attn.norm_added_k is not None:
165
+ encoder_key = attn.norm_added_k(encoder_key)
166
+
167
+ query = torch.cat([encoder_query, query], dim=2)
168
+ key = torch.cat([encoder_key, key], dim=2)
169
+ value = torch.cat([encoder_value, value], dim=2)
170
+
171
+ if image_rotary_emb is not None:
172
+ from ..embeddings import apply_rotary_emb
173
+
174
+ query[:, :, encoder_hidden_states.shape[1] :] = apply_rotary_emb(
175
+ query[:, :, encoder_hidden_states.shape[1] :], image_rotary_emb
176
+ )
177
+ if not attn.is_cross_attention:
178
+ key[:, :, encoder_hidden_states.shape[1] :] = apply_rotary_emb(
179
+ key[:, :, encoder_hidden_states.shape[1] :], image_rotary_emb
180
+ )
181
+
182
+ # 5. Attention
183
+ hidden_states = F.scaled_dot_product_attention(
184
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
185
+ )
186
+ hidden_states = hidden_states.transpose(1, 2).flatten(2, 3)
187
+ hidden_states = hidden_states.to(query.dtype)
188
+
189
+ # 6. Output projection
190
+ if encoder_hidden_states is not None:
191
+ encoder_hidden_states, hidden_states = (
192
+ hidden_states[:, : encoder_hidden_states.shape[1]],
193
+ hidden_states[:, encoder_hidden_states.shape[1] :],
194
+ )
195
+
196
+ if getattr(attn, "to_out", None) is not None:
197
+ hidden_states = attn.to_out[0](hidden_states)
198
+ hidden_states = attn.to_out[1](hidden_states)
199
+
200
+ if getattr(attn, "to_add_out", None) is not None:
201
+ encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
202
+ else:
203
+ if getattr(attn, "to_out", None) is not None:
204
+ hidden_states = attn.to_out[0](hidden_states)
205
+ hidden_states = attn.to_out[1](hidden_states)
206
+
207
+ return hidden_states, encoder_hidden_states
208
+
209
+
210
+ @maybe_allow_in_graph
211
+ class EasyAnimateTransformerBlock(nn.Module):
212
+ def __init__(
213
+ self,
214
+ dim: int,
215
+ num_attention_heads: int,
216
+ attention_head_dim: int,
217
+ time_embed_dim: int,
218
+ dropout: float = 0.0,
219
+ activation_fn: str = "gelu-approximate",
220
+ norm_elementwise_affine: bool = True,
221
+ norm_eps: float = 1e-6,
222
+ final_dropout: bool = True,
223
+ ff_inner_dim: Optional[int] = None,
224
+ ff_bias: bool = True,
225
+ qk_norm: bool = True,
226
+ after_norm: bool = False,
227
+ norm_type: str = "fp32_layer_norm",
228
+ is_mmdit_block: bool = True,
229
+ ):
230
+ super().__init__()
231
+
232
+ # Attention Part
233
+ self.norm1 = EasyAnimateLayerNormZero(
234
+ time_embed_dim, dim, norm_elementwise_affine, norm_eps, norm_type=norm_type, bias=True
235
+ )
236
+
237
+ self.attn1 = Attention(
238
+ query_dim=dim,
239
+ dim_head=attention_head_dim,
240
+ heads=num_attention_heads,
241
+ qk_norm="layer_norm" if qk_norm else None,
242
+ eps=1e-6,
243
+ bias=True,
244
+ added_proj_bias=True,
245
+ added_kv_proj_dim=dim if is_mmdit_block else None,
246
+ context_pre_only=False if is_mmdit_block else None,
247
+ processor=EasyAnimateAttnProcessor2_0(),
248
+ )
249
+
250
+ # FFN Part
251
+ self.norm2 = EasyAnimateLayerNormZero(
252
+ time_embed_dim, dim, norm_elementwise_affine, norm_eps, norm_type=norm_type, bias=True
253
+ )
254
+ self.ff = FeedForward(
255
+ dim,
256
+ dropout=dropout,
257
+ activation_fn=activation_fn,
258
+ final_dropout=final_dropout,
259
+ inner_dim=ff_inner_dim,
260
+ bias=ff_bias,
261
+ )
262
+
263
+ self.txt_ff = None
264
+ if is_mmdit_block:
265
+ self.txt_ff = FeedForward(
266
+ dim,
267
+ dropout=dropout,
268
+ activation_fn=activation_fn,
269
+ final_dropout=final_dropout,
270
+ inner_dim=ff_inner_dim,
271
+ bias=ff_bias,
272
+ )
273
+
274
+ self.norm3 = None
275
+ if after_norm:
276
+ self.norm3 = FP32LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
277
+
278
+ def forward(
279
+ self,
280
+ hidden_states: torch.Tensor,
281
+ encoder_hidden_states: torch.Tensor,
282
+ temb: torch.Tensor,
283
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
284
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
285
+ # 1. Attention
286
+ norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1(
287
+ hidden_states, encoder_hidden_states, temb
288
+ )
289
+ attn_hidden_states, attn_encoder_hidden_states = self.attn1(
290
+ hidden_states=norm_hidden_states,
291
+ encoder_hidden_states=norm_encoder_hidden_states,
292
+ image_rotary_emb=image_rotary_emb,
293
+ )
294
+ hidden_states = hidden_states + gate_msa.unsqueeze(1) * attn_hidden_states
295
+ encoder_hidden_states = encoder_hidden_states + enc_gate_msa.unsqueeze(1) * attn_encoder_hidden_states
296
+
297
+ # 2. Feed-forward
298
+ norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2(
299
+ hidden_states, encoder_hidden_states, temb
300
+ )
301
+ if self.norm3 is not None:
302
+ norm_hidden_states = self.norm3(self.ff(norm_hidden_states))
303
+ if self.txt_ff is not None:
304
+ norm_encoder_hidden_states = self.norm3(self.txt_ff(norm_encoder_hidden_states))
305
+ else:
306
+ norm_encoder_hidden_states = self.norm3(self.ff(norm_encoder_hidden_states))
307
+ else:
308
+ norm_hidden_states = self.ff(norm_hidden_states)
309
+ if self.txt_ff is not None:
310
+ norm_encoder_hidden_states = self.txt_ff(norm_encoder_hidden_states)
311
+ else:
312
+ norm_encoder_hidden_states = self.ff(norm_encoder_hidden_states)
313
+ hidden_states = hidden_states + gate_ff.unsqueeze(1) * norm_hidden_states
314
+ encoder_hidden_states = encoder_hidden_states + enc_gate_ff.unsqueeze(1) * norm_encoder_hidden_states
315
+ return hidden_states, encoder_hidden_states
316
+
317
+
318
+ class EasyAnimateTransformer3DModel(ModelMixin, ConfigMixin):
319
+ """
320
+ A Transformer model for video-like data in [EasyAnimate](https://github.com/aigc-apps/EasyAnimate).
321
+
322
+ Parameters:
323
+ num_attention_heads (`int`, defaults to `48`):
324
+ The number of heads to use for multi-head attention.
325
+ attention_head_dim (`int`, defaults to `64`):
326
+ The number of channels in each head.
327
+ in_channels (`int`, defaults to `16`):
328
+ The number of channels in the input.
329
+ out_channels (`int`, *optional*, defaults to `16`):
330
+ The number of channels in the output.
331
+ patch_size (`int`, defaults to `2`):
332
+ The size of the patches to use in the patch embedding layer.
333
+ sample_width (`int`, defaults to `90`):
334
+ The width of the input latents.
335
+ sample_height (`int`, defaults to `60`):
336
+ The height of the input latents.
337
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
338
+ Activation function to use in feed-forward.
339
+ timestep_activation_fn (`str`, defaults to `"silu"`):
340
+ Activation function to use when generating the timestep embeddings.
341
+ num_layers (`int`, defaults to `30`):
342
+ The number of layers of Transformer blocks to use.
343
+ mmdit_layers (`int`, defaults to `1000`):
344
+ The number of layers of Multi Modal Transformer blocks to use.
345
+ dropout (`float`, defaults to `0.0`):
346
+ The dropout probability to use.
347
+ time_embed_dim (`int`, defaults to `512`):
348
+ Output dimension of timestep embeddings.
349
+ text_embed_dim (`int`, defaults to `4096`):
350
+ Input dimension of text embeddings from the text encoder.
351
+ norm_eps (`float`, defaults to `1e-5`):
352
+ The epsilon value to use in normalization layers.
353
+ norm_elementwise_affine (`bool`, defaults to `True`):
354
+ Whether to use elementwise affine in normalization layers.
355
+ flip_sin_to_cos (`bool`, defaults to `True`):
356
+ Whether to flip the sin to cos in the time embedding.
357
+ time_position_encoding_type (`str`, defaults to `3d_rope`):
358
+ Type of time position encoding.
359
+ after_norm (`bool`, defaults to `False`):
360
+ Flag to apply normalization after.
361
+ resize_inpaint_mask_directly (`bool`, defaults to `True`):
362
+ Flag to resize inpaint mask directly.
363
+ enable_text_attention_mask (`bool`, defaults to `True`):
364
+ Flag to enable text attention mask.
365
+ add_noise_in_inpaint_model (`bool`, defaults to `False`):
366
+ Flag to add noise in inpaint model.
367
+ """
368
+
369
+ _supports_gradient_checkpointing = True
370
+ _no_split_modules = ["EasyAnimateTransformerBlock"]
371
+ _skip_layerwise_casting_patterns = ["^proj$", "norm", "^proj_out$"]
372
+
373
+ @register_to_config
374
+ def __init__(
375
+ self,
376
+ num_attention_heads: int = 48,
377
+ attention_head_dim: int = 64,
378
+ in_channels: Optional[int] = None,
379
+ out_channels: Optional[int] = None,
380
+ patch_size: Optional[int] = None,
381
+ sample_width: int = 90,
382
+ sample_height: int = 60,
383
+ activation_fn: str = "gelu-approximate",
384
+ timestep_activation_fn: str = "silu",
385
+ freq_shift: int = 0,
386
+ num_layers: int = 48,
387
+ mmdit_layers: int = 48,
388
+ dropout: float = 0.0,
389
+ time_embed_dim: int = 512,
390
+ add_norm_text_encoder: bool = False,
391
+ text_embed_dim: int = 3584,
392
+ text_embed_dim_t5: int = None,
393
+ norm_eps: float = 1e-5,
394
+ norm_elementwise_affine: bool = True,
395
+ flip_sin_to_cos: bool = True,
396
+ time_position_encoding_type: str = "3d_rope",
397
+ after_norm=False,
398
+ resize_inpaint_mask_directly: bool = True,
399
+ enable_text_attention_mask: bool = True,
400
+ add_noise_in_inpaint_model: bool = True,
401
+ ):
402
+ super().__init__()
403
+ inner_dim = num_attention_heads * attention_head_dim
404
+
405
+ # 1. Timestep embedding
406
+ self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift)
407
+ self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn)
408
+ self.rope_embedding = EasyAnimateRotaryPosEmbed(patch_size, attention_head_dim)
409
+
410
+ # 2. Patch embedding
411
+ self.proj = nn.Conv2d(
412
+ in_channels, inner_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=True
413
+ )
414
+
415
+ # 3. Text refined embedding
416
+ self.text_proj = None
417
+ self.text_proj_t5 = None
418
+ if not add_norm_text_encoder:
419
+ self.text_proj = nn.Linear(text_embed_dim, inner_dim)
420
+ if text_embed_dim_t5 is not None:
421
+ self.text_proj_t5 = nn.Linear(text_embed_dim_t5, inner_dim)
422
+ else:
423
+ self.text_proj = nn.Sequential(
424
+ RMSNorm(text_embed_dim, 1e-6, elementwise_affine=True), nn.Linear(text_embed_dim, inner_dim)
425
+ )
426
+ if text_embed_dim_t5 is not None:
427
+ self.text_proj_t5 = nn.Sequential(
428
+ RMSNorm(text_embed_dim, 1e-6, elementwise_affine=True), nn.Linear(text_embed_dim_t5, inner_dim)
429
+ )
430
+
431
+ # 4. Transformer blocks
432
+ self.transformer_blocks = nn.ModuleList(
433
+ [
434
+ EasyAnimateTransformerBlock(
435
+ dim=inner_dim,
436
+ num_attention_heads=num_attention_heads,
437
+ attention_head_dim=attention_head_dim,
438
+ time_embed_dim=time_embed_dim,
439
+ dropout=dropout,
440
+ activation_fn=activation_fn,
441
+ norm_elementwise_affine=norm_elementwise_affine,
442
+ norm_eps=norm_eps,
443
+ after_norm=after_norm,
444
+ is_mmdit_block=True if _ < mmdit_layers else False,
445
+ )
446
+ for _ in range(num_layers)
447
+ ]
448
+ )
449
+ self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine)
450
+
451
+ # 5. Output norm & projection
452
+ self.norm_out = AdaLayerNorm(
453
+ embedding_dim=time_embed_dim,
454
+ output_dim=2 * inner_dim,
455
+ norm_elementwise_affine=norm_elementwise_affine,
456
+ norm_eps=norm_eps,
457
+ chunk_dim=1,
458
+ )
459
+ self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels)
460
+
461
+ self.gradient_checkpointing = False
462
+
463
+ def forward(
464
+ self,
465
+ hidden_states: torch.Tensor,
466
+ timestep: torch.Tensor,
467
+ timestep_cond: Optional[torch.Tensor] = None,
468
+ encoder_hidden_states: Optional[torch.Tensor] = None,
469
+ encoder_hidden_states_t5: Optional[torch.Tensor] = None,
470
+ inpaint_latents: Optional[torch.Tensor] = None,
471
+ control_latents: Optional[torch.Tensor] = None,
472
+ return_dict: bool = True,
473
+ ) -> Union[Tuple[torch.Tensor], Transformer2DModelOutput]:
474
+ batch_size, channels, video_length, height, width = hidden_states.size()
475
+ p = self.config.patch_size
476
+ post_patch_height = height // p
477
+ post_patch_width = width // p
478
+
479
+ # 1. Time embedding
480
+ temb = self.time_proj(timestep).to(dtype=hidden_states.dtype)
481
+ temb = self.time_embedding(temb, timestep_cond)
482
+ image_rotary_emb = self.rope_embedding(hidden_states)
483
+
484
+ # 2. Patch embedding
485
+ if inpaint_latents is not None:
486
+ hidden_states = torch.concat([hidden_states, inpaint_latents], 1)
487
+ if control_latents is not None:
488
+ hidden_states = torch.concat([hidden_states, control_latents], 1)
489
+
490
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, F, H, W] -> [BF, C, H, W]
491
+ hidden_states = self.proj(hidden_states)
492
+ hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(
493
+ 0, 2, 1, 3, 4
494
+ ) # [BF, C, H, W] -> [B, F, C, H, W]
495
+ hidden_states = hidden_states.flatten(2, 4).transpose(1, 2) # [B, F, C, H, W] -> [B, FHW, C]
496
+
497
+ # 3. Text embedding
498
+ encoder_hidden_states = self.text_proj(encoder_hidden_states)
499
+ if encoder_hidden_states_t5 is not None:
500
+ encoder_hidden_states_t5 = self.text_proj_t5(encoder_hidden_states_t5)
501
+ encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1).contiguous()
502
+
503
+ # 4. Transformer blocks
504
+ for block in self.transformer_blocks:
505
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
506
+ hidden_states, encoder_hidden_states = self._gradient_checkpointing_func(
507
+ block, hidden_states, encoder_hidden_states, temb, image_rotary_emb
508
+ )
509
+ else:
510
+ hidden_states, encoder_hidden_states = block(
511
+ hidden_states, encoder_hidden_states, temb, image_rotary_emb
512
+ )
513
+
514
+ hidden_states = self.norm_final(hidden_states)
515
+
516
+ # 5. Output norm & projection
517
+ hidden_states = self.norm_out(hidden_states, temb=temb)
518
+ hidden_states = self.proj_out(hidden_states)
519
+
520
+ # 6. Unpatchify
521
+ p = self.config.patch_size
522
+ output = hidden_states.reshape(batch_size, video_length, post_patch_height, post_patch_width, channels, p, p)
523
+ output = output.permute(0, 4, 1, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4)
524
+
525
+ if not return_dict:
526
+ return (output,)
527
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_flux.py ADDED
@@ -0,0 +1,776 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from ...configuration_utils import ConfigMixin, register_to_config
24
+ from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin
25
+ from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
26
+ from ...utils.torch_utils import maybe_allow_in_graph
27
+ from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward
28
+ from ..attention_dispatch import dispatch_attention_fn
29
+ from ..cache_utils import CacheMixin
30
+ from ..embeddings import (
31
+ CombinedTimestepGuidanceTextProjEmbeddings,
32
+ CombinedTimestepTextProjEmbeddings,
33
+ apply_rotary_emb,
34
+ get_1d_rotary_pos_embed,
35
+ )
36
+ from ..modeling_outputs import Transformer2DModelOutput
37
+ from ..modeling_utils import ModelMixin
38
+ from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+
44
+ def _get_projections(attn: "FluxAttention", hidden_states, encoder_hidden_states=None):
45
+ query = attn.to_q(hidden_states)
46
+ key = attn.to_k(hidden_states)
47
+ value = attn.to_v(hidden_states)
48
+
49
+ encoder_query = encoder_key = encoder_value = None
50
+ if encoder_hidden_states is not None and attn.added_kv_proj_dim is not None:
51
+ encoder_query = attn.add_q_proj(encoder_hidden_states)
52
+ encoder_key = attn.add_k_proj(encoder_hidden_states)
53
+ encoder_value = attn.add_v_proj(encoder_hidden_states)
54
+
55
+ return query, key, value, encoder_query, encoder_key, encoder_value
56
+
57
+
58
+ def _get_fused_projections(attn: "FluxAttention", hidden_states, encoder_hidden_states=None):
59
+ query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1)
60
+
61
+ encoder_query = encoder_key = encoder_value = (None,)
62
+ if encoder_hidden_states is not None and hasattr(attn, "to_added_qkv"):
63
+ encoder_query, encoder_key, encoder_value = attn.to_added_qkv(encoder_hidden_states).chunk(3, dim=-1)
64
+
65
+ return query, key, value, encoder_query, encoder_key, encoder_value
66
+
67
+
68
+ def _get_qkv_projections(attn: "FluxAttention", hidden_states, encoder_hidden_states=None):
69
+ if attn.fused_projections:
70
+ return _get_fused_projections(attn, hidden_states, encoder_hidden_states)
71
+ return _get_projections(attn, hidden_states, encoder_hidden_states)
72
+
73
+
74
+ class FluxAttnProcessor:
75
+ _attention_backend = None
76
+
77
+ def __init__(self):
78
+ if not hasattr(F, "scaled_dot_product_attention"):
79
+ raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.")
80
+
81
+ def __call__(
82
+ self,
83
+ attn: "FluxAttention",
84
+ hidden_states: torch.Tensor,
85
+ encoder_hidden_states: torch.Tensor = None,
86
+ attention_mask: Optional[torch.Tensor] = None,
87
+ image_rotary_emb: Optional[torch.Tensor] = None,
88
+ ) -> torch.Tensor:
89
+ query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections(
90
+ attn, hidden_states, encoder_hidden_states
91
+ )
92
+
93
+ query = query.unflatten(-1, (attn.heads, -1))
94
+ key = key.unflatten(-1, (attn.heads, -1))
95
+ value = value.unflatten(-1, (attn.heads, -1))
96
+
97
+ query = attn.norm_q(query)
98
+ key = attn.norm_k(key)
99
+
100
+ if attn.added_kv_proj_dim is not None:
101
+ encoder_query = encoder_query.unflatten(-1, (attn.heads, -1))
102
+ encoder_key = encoder_key.unflatten(-1, (attn.heads, -1))
103
+ encoder_value = encoder_value.unflatten(-1, (attn.heads, -1))
104
+
105
+ encoder_query = attn.norm_added_q(encoder_query)
106
+ encoder_key = attn.norm_added_k(encoder_key)
107
+
108
+ query = torch.cat([encoder_query, query], dim=1)
109
+ key = torch.cat([encoder_key, key], dim=1)
110
+ value = torch.cat([encoder_value, value], dim=1)
111
+
112
+ if image_rotary_emb is not None:
113
+ query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1)
114
+ key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1)
115
+
116
+ hidden_states = dispatch_attention_fn(
117
+ query, key, value, attn_mask=attention_mask, backend=self._attention_backend
118
+ )
119
+ hidden_states = hidden_states.flatten(2, 3)
120
+ hidden_states = hidden_states.to(query.dtype)
121
+
122
+ if encoder_hidden_states is not None:
123
+ encoder_hidden_states, hidden_states = hidden_states.split_with_sizes(
124
+ [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1
125
+ )
126
+ hidden_states = attn.to_out[0](hidden_states)
127
+ hidden_states = attn.to_out[1](hidden_states)
128
+ encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
129
+
130
+ return hidden_states, encoder_hidden_states
131
+ else:
132
+ return hidden_states
133
+
134
+
135
+ class FluxIPAdapterAttnProcessor(torch.nn.Module):
136
+ """Flux Attention processor for IP-Adapter."""
137
+
138
+ _attention_backend = None
139
+
140
+ def __init__(
141
+ self, hidden_size: int, cross_attention_dim: int, num_tokens=(4,), scale=1.0, device=None, dtype=None
142
+ ):
143
+ super().__init__()
144
+
145
+ if not hasattr(F, "scaled_dot_product_attention"):
146
+ raise ImportError(
147
+ f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
148
+ )
149
+
150
+ self.hidden_size = hidden_size
151
+ self.cross_attention_dim = cross_attention_dim
152
+
153
+ if not isinstance(num_tokens, (tuple, list)):
154
+ num_tokens = [num_tokens]
155
+
156
+ if not isinstance(scale, list):
157
+ scale = [scale] * len(num_tokens)
158
+ if len(scale) != len(num_tokens):
159
+ raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.")
160
+ self.scale = scale
161
+
162
+ self.to_k_ip = nn.ModuleList(
163
+ [
164
+ nn.Linear(cross_attention_dim, hidden_size, bias=True, device=device, dtype=dtype)
165
+ for _ in range(len(num_tokens))
166
+ ]
167
+ )
168
+ self.to_v_ip = nn.ModuleList(
169
+ [
170
+ nn.Linear(cross_attention_dim, hidden_size, bias=True, device=device, dtype=dtype)
171
+ for _ in range(len(num_tokens))
172
+ ]
173
+ )
174
+
175
+ def __call__(
176
+ self,
177
+ attn: "FluxAttention",
178
+ hidden_states: torch.Tensor,
179
+ encoder_hidden_states: torch.Tensor = None,
180
+ attention_mask: Optional[torch.Tensor] = None,
181
+ image_rotary_emb: Optional[torch.Tensor] = None,
182
+ ip_hidden_states: Optional[List[torch.Tensor]] = None,
183
+ ip_adapter_masks: Optional[torch.Tensor] = None,
184
+ ) -> torch.Tensor:
185
+ batch_size = hidden_states.shape[0]
186
+
187
+ query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections(
188
+ attn, hidden_states, encoder_hidden_states
189
+ )
190
+
191
+ query = query.unflatten(-1, (attn.heads, -1))
192
+ key = key.unflatten(-1, (attn.heads, -1))
193
+ value = value.unflatten(-1, (attn.heads, -1))
194
+
195
+ query = attn.norm_q(query)
196
+ key = attn.norm_k(key)
197
+ ip_query = query
198
+
199
+ if encoder_hidden_states is not None:
200
+ encoder_query = encoder_query.unflatten(-1, (attn.heads, -1))
201
+ encoder_key = encoder_key.unflatten(-1, (attn.heads, -1))
202
+ encoder_value = encoder_value.unflatten(-1, (attn.heads, -1))
203
+
204
+ encoder_query = attn.norm_added_q(encoder_query)
205
+ encoder_key = attn.norm_added_k(encoder_key)
206
+
207
+ query = torch.cat([encoder_query, query], dim=1)
208
+ key = torch.cat([encoder_key, key], dim=1)
209
+ value = torch.cat([encoder_value, value], dim=1)
210
+
211
+ if image_rotary_emb is not None:
212
+ query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1)
213
+ key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1)
214
+
215
+ hidden_states = dispatch_attention_fn(
216
+ query,
217
+ key,
218
+ value,
219
+ attn_mask=attention_mask,
220
+ dropout_p=0.0,
221
+ is_causal=False,
222
+ backend=self._attention_backend,
223
+ )
224
+ hidden_states = hidden_states.flatten(2, 3)
225
+ hidden_states = hidden_states.to(query.dtype)
226
+
227
+ if encoder_hidden_states is not None:
228
+ encoder_hidden_states, hidden_states = hidden_states.split_with_sizes(
229
+ [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1
230
+ )
231
+ hidden_states = attn.to_out[0](hidden_states)
232
+ hidden_states = attn.to_out[1](hidden_states)
233
+ encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
234
+
235
+ # IP-adapter
236
+ ip_attn_output = torch.zeros_like(hidden_states)
237
+
238
+ for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip(
239
+ ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip
240
+ ):
241
+ ip_key = to_k_ip(current_ip_hidden_states)
242
+ ip_value = to_v_ip(current_ip_hidden_states)
243
+
244
+ ip_key = ip_key.view(batch_size, -1, attn.heads, attn.head_dim)
245
+ ip_value = ip_value.view(batch_size, -1, attn.heads, attn.head_dim)
246
+
247
+ current_ip_hidden_states = dispatch_attention_fn(
248
+ ip_query,
249
+ ip_key,
250
+ ip_value,
251
+ attn_mask=None,
252
+ dropout_p=0.0,
253
+ is_causal=False,
254
+ backend=self._attention_backend,
255
+ )
256
+ current_ip_hidden_states = current_ip_hidden_states.reshape(batch_size, -1, attn.heads * attn.head_dim)
257
+ current_ip_hidden_states = current_ip_hidden_states.to(ip_query.dtype)
258
+ ip_attn_output += scale * current_ip_hidden_states
259
+
260
+ return hidden_states, encoder_hidden_states, ip_attn_output
261
+ else:
262
+ return hidden_states
263
+
264
+
265
+ class FluxAttention(torch.nn.Module, AttentionModuleMixin):
266
+ _default_processor_cls = FluxAttnProcessor
267
+ _available_processors = [
268
+ FluxAttnProcessor,
269
+ FluxIPAdapterAttnProcessor,
270
+ ]
271
+
272
+ def __init__(
273
+ self,
274
+ query_dim: int,
275
+ heads: int = 8,
276
+ dim_head: int = 64,
277
+ dropout: float = 0.0,
278
+ bias: bool = False,
279
+ added_kv_proj_dim: Optional[int] = None,
280
+ added_proj_bias: Optional[bool] = True,
281
+ out_bias: bool = True,
282
+ eps: float = 1e-5,
283
+ out_dim: int = None,
284
+ context_pre_only: Optional[bool] = None,
285
+ pre_only: bool = False,
286
+ elementwise_affine: bool = True,
287
+ processor=None,
288
+ ):
289
+ super().__init__()
290
+
291
+ self.head_dim = dim_head
292
+ self.inner_dim = out_dim if out_dim is not None else dim_head * heads
293
+ self.query_dim = query_dim
294
+ self.use_bias = bias
295
+ self.dropout = dropout
296
+ self.out_dim = out_dim if out_dim is not None else query_dim
297
+ self.context_pre_only = context_pre_only
298
+ self.pre_only = pre_only
299
+ self.heads = out_dim // dim_head if out_dim is not None else heads
300
+ self.added_kv_proj_dim = added_kv_proj_dim
301
+ self.added_proj_bias = added_proj_bias
302
+
303
+ self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
304
+ self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
305
+ self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias)
306
+ self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias)
307
+ self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias)
308
+
309
+ if not self.pre_only:
310
+ self.to_out = torch.nn.ModuleList([])
311
+ self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias))
312
+ self.to_out.append(torch.nn.Dropout(dropout))
313
+
314
+ if added_kv_proj_dim is not None:
315
+ self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps)
316
+ self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps)
317
+ self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias)
318
+ self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias)
319
+ self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias)
320
+ self.to_add_out = torch.nn.Linear(self.inner_dim, query_dim, bias=out_bias)
321
+
322
+ if processor is None:
323
+ processor = self._default_processor_cls()
324
+ self.set_processor(processor)
325
+
326
+ def forward(
327
+ self,
328
+ hidden_states: torch.Tensor,
329
+ encoder_hidden_states: Optional[torch.Tensor] = None,
330
+ attention_mask: Optional[torch.Tensor] = None,
331
+ image_rotary_emb: Optional[torch.Tensor] = None,
332
+ **kwargs,
333
+ ) -> torch.Tensor:
334
+ attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys())
335
+ quiet_attn_parameters = {"ip_adapter_masks", "ip_hidden_states"}
336
+ unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters]
337
+ if len(unused_kwargs) > 0:
338
+ logger.warning(
339
+ f"joint_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored."
340
+ )
341
+ kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters}
342
+ return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs)
343
+
344
+
345
+ @maybe_allow_in_graph
346
+ class FluxSingleTransformerBlock(nn.Module):
347
+ def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0):
348
+ super().__init__()
349
+ self.mlp_hidden_dim = int(dim * mlp_ratio)
350
+
351
+ self.norm = AdaLayerNormZeroSingle(dim)
352
+ self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim)
353
+ self.act_mlp = nn.GELU(approximate="tanh")
354
+ self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
355
+
356
+ self.attn = FluxAttention(
357
+ query_dim=dim,
358
+ dim_head=attention_head_dim,
359
+ heads=num_attention_heads,
360
+ out_dim=dim,
361
+ bias=True,
362
+ processor=FluxAttnProcessor(),
363
+ eps=1e-6,
364
+ pre_only=True,
365
+ )
366
+
367
+ def forward(
368
+ self,
369
+ hidden_states: torch.Tensor,
370
+ encoder_hidden_states: torch.Tensor,
371
+ temb: torch.Tensor,
372
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
373
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
374
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
375
+ text_seq_len = encoder_hidden_states.shape[1]
376
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
377
+
378
+ residual = hidden_states
379
+ norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
380
+ mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
381
+ joint_attention_kwargs = joint_attention_kwargs or {}
382
+ attn_output = self.attn(
383
+ hidden_states=norm_hidden_states,
384
+ image_rotary_emb=image_rotary_emb,
385
+ **joint_attention_kwargs,
386
+ )
387
+
388
+ hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
389
+ gate = gate.unsqueeze(1)
390
+ hidden_states = gate * self.proj_out(hidden_states)
391
+ hidden_states = residual + hidden_states
392
+ if hidden_states.dtype == torch.float16:
393
+ hidden_states = hidden_states.clip(-65504, 65504)
394
+
395
+ encoder_hidden_states, hidden_states = hidden_states[:, :text_seq_len], hidden_states[:, text_seq_len:]
396
+ return encoder_hidden_states, hidden_states
397
+
398
+
399
+ @maybe_allow_in_graph
400
+ class FluxTransformerBlock(nn.Module):
401
+ def __init__(
402
+ self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6
403
+ ):
404
+ super().__init__()
405
+
406
+ self.norm1 = AdaLayerNormZero(dim)
407
+ self.norm1_context = AdaLayerNormZero(dim)
408
+
409
+ self.attn = FluxAttention(
410
+ query_dim=dim,
411
+ added_kv_proj_dim=dim,
412
+ dim_head=attention_head_dim,
413
+ heads=num_attention_heads,
414
+ out_dim=dim,
415
+ context_pre_only=False,
416
+ bias=True,
417
+ processor=FluxAttnProcessor(),
418
+ eps=eps,
419
+ )
420
+
421
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
422
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
423
+
424
+ self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
425
+ self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
426
+
427
+ def forward(
428
+ self,
429
+ hidden_states: torch.Tensor,
430
+ encoder_hidden_states: torch.Tensor,
431
+ temb: torch.Tensor,
432
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
433
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
434
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
435
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
436
+
437
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
438
+ encoder_hidden_states, emb=temb
439
+ )
440
+ joint_attention_kwargs = joint_attention_kwargs or {}
441
+
442
+ # Attention.
443
+ attention_outputs = self.attn(
444
+ hidden_states=norm_hidden_states,
445
+ encoder_hidden_states=norm_encoder_hidden_states,
446
+ image_rotary_emb=image_rotary_emb,
447
+ **joint_attention_kwargs,
448
+ )
449
+
450
+ if len(attention_outputs) == 2:
451
+ attn_output, context_attn_output = attention_outputs
452
+ elif len(attention_outputs) == 3:
453
+ attn_output, context_attn_output, ip_attn_output = attention_outputs
454
+
455
+ # Process attention outputs for the `hidden_states`.
456
+ attn_output = gate_msa.unsqueeze(1) * attn_output
457
+ hidden_states = hidden_states + attn_output
458
+
459
+ norm_hidden_states = self.norm2(hidden_states)
460
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
461
+
462
+ ff_output = self.ff(norm_hidden_states)
463
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
464
+
465
+ hidden_states = hidden_states + ff_output
466
+ if len(attention_outputs) == 3:
467
+ hidden_states = hidden_states + ip_attn_output
468
+
469
+ # Process attention outputs for the `encoder_hidden_states`.
470
+ context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
471
+ encoder_hidden_states = encoder_hidden_states + context_attn_output
472
+
473
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
474
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
475
+
476
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
477
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output
478
+ if encoder_hidden_states.dtype == torch.float16:
479
+ encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504)
480
+
481
+ return encoder_hidden_states, hidden_states
482
+
483
+
484
+ class FluxPosEmbed(nn.Module):
485
+ # modified from https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/modules/layers.py#L11
486
+ def __init__(self, theta: int, axes_dim: List[int]):
487
+ super().__init__()
488
+ self.theta = theta
489
+ self.axes_dim = axes_dim
490
+
491
+ def forward(self, ids: torch.Tensor) -> torch.Tensor:
492
+ n_axes = ids.shape[-1]
493
+ cos_out = []
494
+ sin_out = []
495
+ pos = ids.float()
496
+ is_mps = ids.device.type == "mps"
497
+ is_npu = ids.device.type == "npu"
498
+ freqs_dtype = torch.float32 if (is_mps or is_npu) else torch.float64
499
+ for i in range(n_axes):
500
+ cos, sin = get_1d_rotary_pos_embed(
501
+ self.axes_dim[i],
502
+ pos[:, i],
503
+ theta=self.theta,
504
+ repeat_interleave_real=True,
505
+ use_real=True,
506
+ freqs_dtype=freqs_dtype,
507
+ )
508
+ cos_out.append(cos)
509
+ sin_out.append(sin)
510
+ freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device)
511
+ freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device)
512
+ return freqs_cos, freqs_sin
513
+
514
+
515
+ class FluxTransformer2DModel(
516
+ ModelMixin,
517
+ ConfigMixin,
518
+ PeftAdapterMixin,
519
+ FromOriginalModelMixin,
520
+ FluxTransformer2DLoadersMixin,
521
+ CacheMixin,
522
+ AttentionMixin,
523
+ ):
524
+ """
525
+ The Transformer model introduced in Flux.
526
+
527
+ Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
528
+
529
+ Args:
530
+ patch_size (`int`, defaults to `1`):
531
+ Patch size to turn the input data into small patches.
532
+ in_channels (`int`, defaults to `64`):
533
+ The number of channels in the input.
534
+ out_channels (`int`, *optional*, defaults to `None`):
535
+ The number of channels in the output. If not specified, it defaults to `in_channels`.
536
+ num_layers (`int`, defaults to `19`):
537
+ The number of layers of dual stream DiT blocks to use.
538
+ num_single_layers (`int`, defaults to `38`):
539
+ The number of layers of single stream DiT blocks to use.
540
+ attention_head_dim (`int`, defaults to `128`):
541
+ The number of dimensions to use for each attention head.
542
+ num_attention_heads (`int`, defaults to `24`):
543
+ The number of attention heads to use.
544
+ joint_attention_dim (`int`, defaults to `4096`):
545
+ The number of dimensions to use for the joint attention (embedding/channel dimension of
546
+ `encoder_hidden_states`).
547
+ pooled_projection_dim (`int`, defaults to `768`):
548
+ The number of dimensions to use for the pooled projection.
549
+ guidance_embeds (`bool`, defaults to `False`):
550
+ Whether to use guidance embeddings for guidance-distilled variant of the model.
551
+ axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`):
552
+ The dimensions to use for the rotary positional embeddings.
553
+ """
554
+
555
+ _supports_gradient_checkpointing = True
556
+ _no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"]
557
+ _skip_layerwise_casting_patterns = ["pos_embed", "norm"]
558
+ _repeated_blocks = ["FluxTransformerBlock", "FluxSingleTransformerBlock"]
559
+
560
+ @register_to_config
561
+ def __init__(
562
+ self,
563
+ patch_size: int = 1,
564
+ in_channels: int = 64,
565
+ out_channels: Optional[int] = None,
566
+ num_layers: int = 19,
567
+ num_single_layers: int = 38,
568
+ attention_head_dim: int = 128,
569
+ num_attention_heads: int = 24,
570
+ joint_attention_dim: int = 4096,
571
+ pooled_projection_dim: int = 768,
572
+ guidance_embeds: bool = False,
573
+ axes_dims_rope: Tuple[int, int, int] = (16, 56, 56),
574
+ ):
575
+ super().__init__()
576
+ self.out_channels = out_channels or in_channels
577
+ self.inner_dim = num_attention_heads * attention_head_dim
578
+
579
+ self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope)
580
+
581
+ text_time_guidance_cls = (
582
+ CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings
583
+ )
584
+ self.time_text_embed = text_time_guidance_cls(
585
+ embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim
586
+ )
587
+
588
+ self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim)
589
+ self.x_embedder = nn.Linear(in_channels, self.inner_dim)
590
+
591
+ self.transformer_blocks = nn.ModuleList(
592
+ [
593
+ FluxTransformerBlock(
594
+ dim=self.inner_dim,
595
+ num_attention_heads=num_attention_heads,
596
+ attention_head_dim=attention_head_dim,
597
+ )
598
+ for _ in range(num_layers)
599
+ ]
600
+ )
601
+
602
+ self.single_transformer_blocks = nn.ModuleList(
603
+ [
604
+ FluxSingleTransformerBlock(
605
+ dim=self.inner_dim,
606
+ num_attention_heads=num_attention_heads,
607
+ attention_head_dim=attention_head_dim,
608
+ )
609
+ for _ in range(num_single_layers)
610
+ ]
611
+ )
612
+
613
+ self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
614
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
615
+
616
+ self.gradient_checkpointing = False
617
+
618
+ def forward(
619
+ self,
620
+ hidden_states: torch.Tensor,
621
+ encoder_hidden_states: torch.Tensor = None,
622
+ pooled_projections: torch.Tensor = None,
623
+ timestep: torch.LongTensor = None,
624
+ img_ids: torch.Tensor = None,
625
+ txt_ids: torch.Tensor = None,
626
+ guidance: torch.Tensor = None,
627
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
628
+ controlnet_block_samples=None,
629
+ controlnet_single_block_samples=None,
630
+ return_dict: bool = True,
631
+ controlnet_blocks_repeat: bool = False,
632
+ ) -> Union[torch.Tensor, Transformer2DModelOutput]:
633
+ """
634
+ The [`FluxTransformer2DModel`] forward method.
635
+
636
+ Args:
637
+ hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`):
638
+ Input `hidden_states`.
639
+ encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`):
640
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
641
+ pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected
642
+ from the embeddings of input conditions.
643
+ timestep ( `torch.LongTensor`):
644
+ Used to indicate denoising step.
645
+ block_controlnet_hidden_states: (`list` of `torch.Tensor`):
646
+ A list of tensors that if specified are added to the residuals of transformer blocks.
647
+ joint_attention_kwargs (`dict`, *optional*):
648
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
649
+ `self.processor` in
650
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
651
+ return_dict (`bool`, *optional*, defaults to `True`):
652
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
653
+ tuple.
654
+
655
+ Returns:
656
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
657
+ `tuple` where the first element is the sample tensor.
658
+ """
659
+ if joint_attention_kwargs is not None:
660
+ joint_attention_kwargs = joint_attention_kwargs.copy()
661
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
662
+ else:
663
+ lora_scale = 1.0
664
+
665
+ if USE_PEFT_BACKEND:
666
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
667
+ scale_lora_layers(self, lora_scale)
668
+ else:
669
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
670
+ logger.warning(
671
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
672
+ )
673
+
674
+ hidden_states = self.x_embedder(hidden_states)
675
+
676
+ timestep = timestep.to(hidden_states.dtype) * 1000
677
+ if guidance is not None:
678
+ guidance = guidance.to(hidden_states.dtype) * 1000
679
+
680
+ temb = (
681
+ self.time_text_embed(timestep, pooled_projections)
682
+ if guidance is None
683
+ else self.time_text_embed(timestep, guidance, pooled_projections)
684
+ )
685
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
686
+
687
+ if txt_ids.ndim == 3:
688
+ logger.warning(
689
+ "Passing `txt_ids` 3d torch.Tensor is deprecated."
690
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
691
+ )
692
+ txt_ids = txt_ids[0]
693
+ if img_ids.ndim == 3:
694
+ logger.warning(
695
+ "Passing `img_ids` 3d torch.Tensor is deprecated."
696
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
697
+ )
698
+ img_ids = img_ids[0]
699
+
700
+ ids = torch.cat((txt_ids, img_ids), dim=0)
701
+ image_rotary_emb = self.pos_embed(ids)
702
+
703
+ if joint_attention_kwargs is not None and "ip_adapter_image_embeds" in joint_attention_kwargs:
704
+ ip_adapter_image_embeds = joint_attention_kwargs.pop("ip_adapter_image_embeds")
705
+ ip_hidden_states = self.encoder_hid_proj(ip_adapter_image_embeds)
706
+ joint_attention_kwargs.update({"ip_hidden_states": ip_hidden_states})
707
+
708
+ for index_block, block in enumerate(self.transformer_blocks):
709
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
710
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
711
+ block,
712
+ hidden_states,
713
+ encoder_hidden_states,
714
+ temb,
715
+ image_rotary_emb,
716
+ joint_attention_kwargs,
717
+ )
718
+
719
+ else:
720
+ encoder_hidden_states, hidden_states = block(
721
+ hidden_states=hidden_states,
722
+ encoder_hidden_states=encoder_hidden_states,
723
+ temb=temb,
724
+ image_rotary_emb=image_rotary_emb,
725
+ joint_attention_kwargs=joint_attention_kwargs,
726
+ )
727
+
728
+ # controlnet residual
729
+ if controlnet_block_samples is not None:
730
+ interval_control = len(self.transformer_blocks) / len(controlnet_block_samples)
731
+ interval_control = int(np.ceil(interval_control))
732
+ # For Xlabs ControlNet.
733
+ if controlnet_blocks_repeat:
734
+ hidden_states = (
735
+ hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)]
736
+ )
737
+ else:
738
+ hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control]
739
+
740
+ for index_block, block in enumerate(self.single_transformer_blocks):
741
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
742
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
743
+ block,
744
+ hidden_states,
745
+ encoder_hidden_states,
746
+ temb,
747
+ image_rotary_emb,
748
+ joint_attention_kwargs,
749
+ )
750
+
751
+ else:
752
+ encoder_hidden_states, hidden_states = block(
753
+ hidden_states=hidden_states,
754
+ encoder_hidden_states=encoder_hidden_states,
755
+ temb=temb,
756
+ image_rotary_emb=image_rotary_emb,
757
+ joint_attention_kwargs=joint_attention_kwargs,
758
+ )
759
+
760
+ # controlnet residual
761
+ if controlnet_single_block_samples is not None:
762
+ interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples)
763
+ interval_control = int(np.ceil(interval_control))
764
+ hidden_states = hidden_states + controlnet_single_block_samples[index_block // interval_control]
765
+
766
+ hidden_states = self.norm_out(hidden_states, temb)
767
+ output = self.proj_out(hidden_states)
768
+
769
+ if USE_PEFT_BACKEND:
770
+ # remove `lora_scale` from each PEFT layer
771
+ unscale_lora_layers(self, lora_scale)
772
+
773
+ if not return_dict:
774
+ return (output,)
775
+
776
+ return Transformer2DModelOutput(sample=output)
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/models/transformers/transformer_hidream_image.py ADDED
@@ -0,0 +1,942 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional, Tuple, Union
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+
7
+ from ...configuration_utils import ConfigMixin, register_to_config
8
+ from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
9
+ from ...models.modeling_outputs import Transformer2DModelOutput
10
+ from ...models.modeling_utils import ModelMixin
11
+ from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
12
+ from ...utils.torch_utils import maybe_allow_in_graph
13
+ from ..attention import Attention
14
+ from ..embeddings import TimestepEmbedding, Timesteps
15
+
16
+
17
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
18
+
19
+
20
+ class HiDreamImageFeedForwardSwiGLU(nn.Module):
21
+ def __init__(
22
+ self,
23
+ dim: int,
24
+ hidden_dim: int,
25
+ multiple_of: int = 256,
26
+ ffn_dim_multiplier: Optional[float] = None,
27
+ ):
28
+ super().__init__()
29
+ hidden_dim = int(2 * hidden_dim / 3)
30
+ # custom dim factor multiplier
31
+ if ffn_dim_multiplier is not None:
32
+ hidden_dim = int(ffn_dim_multiplier * hidden_dim)
33
+ hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
34
+
35
+ self.w1 = nn.Linear(dim, hidden_dim, bias=False)
36
+ self.w2 = nn.Linear(hidden_dim, dim, bias=False)
37
+ self.w3 = nn.Linear(dim, hidden_dim, bias=False)
38
+
39
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
40
+ return self.w2(torch.nn.functional.silu(self.w1(x)) * self.w3(x))
41
+
42
+
43
+ class HiDreamImagePooledEmbed(nn.Module):
44
+ def __init__(self, text_emb_dim, hidden_size):
45
+ super().__init__()
46
+ self.pooled_embedder = TimestepEmbedding(in_channels=text_emb_dim, time_embed_dim=hidden_size)
47
+
48
+ def forward(self, pooled_embed: torch.Tensor) -> torch.Tensor:
49
+ return self.pooled_embedder(pooled_embed)
50
+
51
+
52
+ class HiDreamImageTimestepEmbed(nn.Module):
53
+ def __init__(self, hidden_size, frequency_embedding_size=256):
54
+ super().__init__()
55
+ self.time_proj = Timesteps(num_channels=frequency_embedding_size, flip_sin_to_cos=True, downscale_freq_shift=0)
56
+ self.timestep_embedder = TimestepEmbedding(in_channels=frequency_embedding_size, time_embed_dim=hidden_size)
57
+
58
+ def forward(self, timesteps: torch.Tensor, wdtype: Optional[torch.dtype] = None):
59
+ t_emb = self.time_proj(timesteps).to(dtype=wdtype)
60
+ t_emb = self.timestep_embedder(t_emb)
61
+ return t_emb
62
+
63
+
64
+ class HiDreamImageOutEmbed(nn.Module):
65
+ def __init__(self, hidden_size, patch_size, out_channels):
66
+ super().__init__()
67
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
68
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
69
+ self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True))
70
+
71
+ def forward(self, hidden_states: torch.Tensor, temb: torch.Tensor) -> torch.Tensor:
72
+ shift, scale = self.adaLN_modulation(temb).chunk(2, dim=1)
73
+ hidden_states = self.norm_final(hidden_states) * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
74
+ hidden_states = self.linear(hidden_states)
75
+ return hidden_states
76
+
77
+
78
+ class HiDreamImagePatchEmbed(nn.Module):
79
+ def __init__(
80
+ self,
81
+ patch_size=2,
82
+ in_channels=4,
83
+ out_channels=1024,
84
+ ):
85
+ super().__init__()
86
+ self.patch_size = patch_size
87
+ self.out_channels = out_channels
88
+ self.proj = nn.Linear(in_channels * patch_size * patch_size, out_channels, bias=True)
89
+
90
+ def forward(self, latent):
91
+ latent = self.proj(latent)
92
+ return latent
93
+
94
+
95
+ def rope(pos: torch.Tensor, dim: int, theta: int) -> torch.Tensor:
96
+ assert dim % 2 == 0, "The dimension must be even."
97
+
98
+ is_mps = pos.device.type == "mps"
99
+ is_npu = pos.device.type == "npu"
100
+
101
+ dtype = torch.float32 if (is_mps or is_npu) else torch.float64
102
+
103
+ scale = torch.arange(0, dim, 2, dtype=dtype, device=pos.device) / dim
104
+ omega = 1.0 / (theta**scale)
105
+
106
+ batch_size, seq_length = pos.shape
107
+ out = torch.einsum("...n,d->...nd", pos, omega)
108
+ cos_out = torch.cos(out)
109
+ sin_out = torch.sin(out)
110
+
111
+ stacked_out = torch.stack([cos_out, -sin_out, sin_out, cos_out], dim=-1)
112
+ out = stacked_out.view(batch_size, -1, dim // 2, 2, 2)
113
+ return out.float()
114
+
115
+
116
+ class HiDreamImageEmbedND(nn.Module):
117
+ def __init__(self, theta: int, axes_dim: List[int]):
118
+ super().__init__()
119
+ self.theta = theta
120
+ self.axes_dim = axes_dim
121
+
122
+ def forward(self, ids: torch.Tensor) -> torch.Tensor:
123
+ n_axes = ids.shape[-1]
124
+ emb = torch.cat(
125
+ [rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)],
126
+ dim=-3,
127
+ )
128
+ return emb.unsqueeze(2)
129
+
130
+
131
+ def apply_rope(xq: torch.Tensor, xk: torch.Tensor, freqs_cis: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
132
+ xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
133
+ xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
134
+ xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
135
+ xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
136
+ return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
137
+
138
+
139
+ @maybe_allow_in_graph
140
+ class HiDreamAttention(Attention):
141
+ def __init__(
142
+ self,
143
+ query_dim: int,
144
+ heads: int = 8,
145
+ dim_head: int = 64,
146
+ upcast_attention: bool = False,
147
+ upcast_softmax: bool = False,
148
+ scale_qk: bool = True,
149
+ eps: float = 1e-5,
150
+ processor=None,
151
+ out_dim: int = None,
152
+ single: bool = False,
153
+ ):
154
+ super(Attention, self).__init__()
155
+ self.inner_dim = out_dim if out_dim is not None else dim_head * heads
156
+ self.query_dim = query_dim
157
+ self.upcast_attention = upcast_attention
158
+ self.upcast_softmax = upcast_softmax
159
+ self.out_dim = out_dim if out_dim is not None else query_dim
160
+
161
+ self.scale_qk = scale_qk
162
+ self.scale = dim_head**-0.5 if self.scale_qk else 1.0
163
+
164
+ self.heads = out_dim // dim_head if out_dim is not None else heads
165
+ self.sliceable_head_dim = heads
166
+ self.single = single
167
+
168
+ self.to_q = nn.Linear(query_dim, self.inner_dim)
169
+ self.to_k = nn.Linear(self.inner_dim, self.inner_dim)
170
+ self.to_v = nn.Linear(self.inner_dim, self.inner_dim)
171
+ self.to_out = nn.Linear(self.inner_dim, self.out_dim)
172
+ self.q_rms_norm = nn.RMSNorm(self.inner_dim, eps)
173
+ self.k_rms_norm = nn.RMSNorm(self.inner_dim, eps)
174
+
175
+ if not single:
176
+ self.to_q_t = nn.Linear(query_dim, self.inner_dim)
177
+ self.to_k_t = nn.Linear(self.inner_dim, self.inner_dim)
178
+ self.to_v_t = nn.Linear(self.inner_dim, self.inner_dim)
179
+ self.to_out_t = nn.Linear(self.inner_dim, self.out_dim)
180
+ self.q_rms_norm_t = nn.RMSNorm(self.inner_dim, eps)
181
+ self.k_rms_norm_t = nn.RMSNorm(self.inner_dim, eps)
182
+
183
+ self.set_processor(processor)
184
+
185
+ def forward(
186
+ self,
187
+ norm_hidden_states: torch.Tensor,
188
+ hidden_states_masks: torch.Tensor = None,
189
+ norm_encoder_hidden_states: torch.Tensor = None,
190
+ image_rotary_emb: torch.Tensor = None,
191
+ ) -> torch.Tensor:
192
+ return self.processor(
193
+ self,
194
+ hidden_states=norm_hidden_states,
195
+ hidden_states_masks=hidden_states_masks,
196
+ encoder_hidden_states=norm_encoder_hidden_states,
197
+ image_rotary_emb=image_rotary_emb,
198
+ )
199
+
200
+
201
+ class HiDreamAttnProcessor:
202
+ """Attention processor used typically in processing the SD3-like self-attention projections."""
203
+
204
+ def __call__(
205
+ self,
206
+ attn: HiDreamAttention,
207
+ hidden_states: torch.Tensor,
208
+ hidden_states_masks: Optional[torch.Tensor] = None,
209
+ encoder_hidden_states: Optional[torch.Tensor] = None,
210
+ image_rotary_emb: torch.Tensor = None,
211
+ *args,
212
+ **kwargs,
213
+ ) -> torch.Tensor:
214
+ dtype = hidden_states.dtype
215
+ batch_size = hidden_states.shape[0]
216
+
217
+ query_i = attn.q_rms_norm(attn.to_q(hidden_states)).to(dtype=dtype)
218
+ key_i = attn.k_rms_norm(attn.to_k(hidden_states)).to(dtype=dtype)
219
+ value_i = attn.to_v(hidden_states)
220
+
221
+ inner_dim = key_i.shape[-1]
222
+ head_dim = inner_dim // attn.heads
223
+
224
+ query_i = query_i.view(batch_size, -1, attn.heads, head_dim)
225
+ key_i = key_i.view(batch_size, -1, attn.heads, head_dim)
226
+ value_i = value_i.view(batch_size, -1, attn.heads, head_dim)
227
+ if hidden_states_masks is not None:
228
+ key_i = key_i * hidden_states_masks.view(batch_size, -1, 1, 1)
229
+
230
+ if not attn.single:
231
+ query_t = attn.q_rms_norm_t(attn.to_q_t(encoder_hidden_states)).to(dtype=dtype)
232
+ key_t = attn.k_rms_norm_t(attn.to_k_t(encoder_hidden_states)).to(dtype=dtype)
233
+ value_t = attn.to_v_t(encoder_hidden_states)
234
+
235
+ query_t = query_t.view(batch_size, -1, attn.heads, head_dim)
236
+ key_t = key_t.view(batch_size, -1, attn.heads, head_dim)
237
+ value_t = value_t.view(batch_size, -1, attn.heads, head_dim)
238
+
239
+ num_image_tokens = query_i.shape[1]
240
+ num_text_tokens = query_t.shape[1]
241
+ query = torch.cat([query_i, query_t], dim=1)
242
+ key = torch.cat([key_i, key_t], dim=1)
243
+ value = torch.cat([value_i, value_t], dim=1)
244
+ else:
245
+ query = query_i
246
+ key = key_i
247
+ value = value_i
248
+
249
+ if query.shape[-1] == image_rotary_emb.shape[-3] * 2:
250
+ query, key = apply_rope(query, key, image_rotary_emb)
251
+
252
+ else:
253
+ query_1, query_2 = query.chunk(2, dim=-1)
254
+ key_1, key_2 = key.chunk(2, dim=-1)
255
+ query_1, key_1 = apply_rope(query_1, key_1, image_rotary_emb)
256
+ query = torch.cat([query_1, query_2], dim=-1)
257
+ key = torch.cat([key_1, key_2], dim=-1)
258
+
259
+ hidden_states = F.scaled_dot_product_attention(
260
+ query.transpose(1, 2), key.transpose(1, 2), value.transpose(1, 2), dropout_p=0.0, is_causal=False
261
+ )
262
+
263
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
264
+ hidden_states = hidden_states.to(query.dtype)
265
+
266
+ if not attn.single:
267
+ hidden_states_i, hidden_states_t = torch.split(hidden_states, [num_image_tokens, num_text_tokens], dim=1)
268
+ hidden_states_i = attn.to_out(hidden_states_i)
269
+ hidden_states_t = attn.to_out_t(hidden_states_t)
270
+ return hidden_states_i, hidden_states_t
271
+ else:
272
+ hidden_states = attn.to_out(hidden_states)
273
+ return hidden_states
274
+
275
+
276
+ # Modified from https://github.com/deepseek-ai/DeepSeek-V3/blob/main/inference/model.py
277
+ class MoEGate(nn.Module):
278
+ def __init__(
279
+ self,
280
+ embed_dim,
281
+ num_routed_experts=4,
282
+ num_activated_experts=2,
283
+ aux_loss_alpha=0.01,
284
+ _force_inference_output=False,
285
+ ):
286
+ super().__init__()
287
+ self.top_k = num_activated_experts
288
+ self.n_routed_experts = num_routed_experts
289
+
290
+ self.scoring_func = "softmax"
291
+ self.alpha = aux_loss_alpha
292
+ self.seq_aux = False
293
+
294
+ # topk selection algorithm
295
+ self.norm_topk_prob = False
296
+ self.gating_dim = embed_dim
297
+ self.weight = nn.Parameter(torch.randn(self.n_routed_experts, self.gating_dim) / embed_dim**0.5)
298
+
299
+ self._force_inference_output = _force_inference_output
300
+
301
+ def forward(self, hidden_states):
302
+ bsz, seq_len, h = hidden_states.shape
303
+ ### compute gating score
304
+ hidden_states = hidden_states.view(-1, h)
305
+ logits = F.linear(hidden_states, self.weight, None)
306
+ if self.scoring_func == "softmax":
307
+ scores = logits.softmax(dim=-1)
308
+ else:
309
+ raise NotImplementedError(f"insupportable scoring function for MoE gating: {self.scoring_func}")
310
+
311
+ ### select top-k experts
312
+ topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False)
313
+
314
+ ### norm gate to sum 1
315
+ if self.top_k > 1 and self.norm_topk_prob:
316
+ denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
317
+ topk_weight = topk_weight / denominator
318
+
319
+ ### expert-level computation auxiliary loss
320
+ if self.training and self.alpha > 0.0 and not self._force_inference_output:
321
+ scores_for_aux = scores
322
+ aux_topk = self.top_k
323
+ # always compute aux loss based on the naive greedy topk method
324
+ topk_idx_for_aux_loss = topk_idx.view(bsz, -1)
325
+ if self.seq_aux:
326
+ scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1)
327
+ ce = torch.zeros(bsz, self.n_routed_experts, device=hidden_states.device)
328
+ ce.scatter_add_(
329
+ 1, topk_idx_for_aux_loss, torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device)
330
+ ).div_(seq_len * aux_topk / self.n_routed_experts)
331
+ aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha
332
+ else:
333
+ mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts)
334
+ ce = mask_ce.float().mean(0)
335
+
336
+ Pi = scores_for_aux.mean(0)
337
+ fi = ce * self.n_routed_experts
338
+ aux_loss = (Pi * fi).sum() * self.alpha
339
+ else:
340
+ aux_loss = None
341
+ return topk_idx, topk_weight, aux_loss
342
+
343
+
344
+ # Modified from https://github.com/deepseek-ai/DeepSeek-V3/blob/main/inference/model.py
345
+ class MOEFeedForwardSwiGLU(nn.Module):
346
+ def __init__(
347
+ self,
348
+ dim: int,
349
+ hidden_dim: int,
350
+ num_routed_experts: int,
351
+ num_activated_experts: int,
352
+ _force_inference_output: bool = False,
353
+ ):
354
+ super().__init__()
355
+ self.shared_experts = HiDreamImageFeedForwardSwiGLU(dim, hidden_dim // 2)
356
+ self.experts = nn.ModuleList(
357
+ [HiDreamImageFeedForwardSwiGLU(dim, hidden_dim) for i in range(num_routed_experts)]
358
+ )
359
+ self._force_inference_output = _force_inference_output
360
+ self.gate = MoEGate(
361
+ embed_dim=dim,
362
+ num_routed_experts=num_routed_experts,
363
+ num_activated_experts=num_activated_experts,
364
+ _force_inference_output=_force_inference_output,
365
+ )
366
+ self.num_activated_experts = num_activated_experts
367
+
368
+ def forward(self, x):
369
+ wtype = x.dtype
370
+ identity = x
371
+ orig_shape = x.shape
372
+ topk_idx, topk_weight, aux_loss = self.gate(x)
373
+ x = x.view(-1, x.shape[-1])
374
+ flat_topk_idx = topk_idx.view(-1)
375
+ if self.training and not self._force_inference_output:
376
+ x = x.repeat_interleave(self.num_activated_experts, dim=0)
377
+ y = torch.empty_like(x, dtype=wtype)
378
+ for i, expert in enumerate(self.experts):
379
+ y[flat_topk_idx == i] = expert(x[flat_topk_idx == i]).to(dtype=wtype)
380
+ y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
381
+ y = y.view(*orig_shape).to(dtype=wtype)
382
+ # y = AddAuxiliaryLoss.apply(y, aux_loss)
383
+ else:
384
+ y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape)
385
+ y = y + self.shared_experts(identity)
386
+ return y
387
+
388
+ @torch.no_grad()
389
+ def moe_infer(self, x, flat_expert_indices, flat_expert_weights):
390
+ expert_cache = torch.zeros_like(x)
391
+ idxs = flat_expert_indices.argsort()
392
+ tokens_per_expert = flat_expert_indices.bincount().cpu().numpy().cumsum(0)
393
+ token_idxs = idxs // self.num_activated_experts
394
+ for i, end_idx in enumerate(tokens_per_expert):
395
+ start_idx = 0 if i == 0 else tokens_per_expert[i - 1]
396
+ if start_idx == end_idx:
397
+ continue
398
+ expert = self.experts[i]
399
+ exp_token_idx = token_idxs[start_idx:end_idx]
400
+ expert_tokens = x[exp_token_idx]
401
+ expert_out = expert(expert_tokens)
402
+ expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]])
403
+
404
+ # for fp16 and other dtype
405
+ expert_cache = expert_cache.to(expert_out.dtype)
406
+ expert_cache.scatter_reduce_(0, exp_token_idx.view(-1, 1).repeat(1, x.shape[-1]), expert_out, reduce="sum")
407
+ return expert_cache
408
+
409
+
410
+ class TextProjection(nn.Module):
411
+ def __init__(self, in_features, hidden_size):
412
+ super().__init__()
413
+ self.linear = nn.Linear(in_features=in_features, out_features=hidden_size, bias=False)
414
+
415
+ def forward(self, caption):
416
+ hidden_states = self.linear(caption)
417
+ return hidden_states
418
+
419
+
420
+ @maybe_allow_in_graph
421
+ class HiDreamImageSingleTransformerBlock(nn.Module):
422
+ def __init__(
423
+ self,
424
+ dim: int,
425
+ num_attention_heads: int,
426
+ attention_head_dim: int,
427
+ num_routed_experts: int = 4,
428
+ num_activated_experts: int = 2,
429
+ _force_inference_output: bool = False,
430
+ ):
431
+ super().__init__()
432
+ self.num_attention_heads = num_attention_heads
433
+ self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 6 * dim, bias=True))
434
+
435
+ # 1. Attention
436
+ self.norm1_i = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False)
437
+ self.attn1 = HiDreamAttention(
438
+ query_dim=dim,
439
+ heads=num_attention_heads,
440
+ dim_head=attention_head_dim,
441
+ processor=HiDreamAttnProcessor(),
442
+ single=True,
443
+ )
444
+
445
+ # 3. Feed-forward
446
+ self.norm3_i = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False)
447
+ if num_routed_experts > 0:
448
+ self.ff_i = MOEFeedForwardSwiGLU(
449
+ dim=dim,
450
+ hidden_dim=4 * dim,
451
+ num_routed_experts=num_routed_experts,
452
+ num_activated_experts=num_activated_experts,
453
+ _force_inference_output=_force_inference_output,
454
+ )
455
+ else:
456
+ self.ff_i = HiDreamImageFeedForwardSwiGLU(dim=dim, hidden_dim=4 * dim)
457
+
458
+ def forward(
459
+ self,
460
+ hidden_states: torch.Tensor,
461
+ hidden_states_masks: Optional[torch.Tensor] = None,
462
+ encoder_hidden_states: Optional[torch.Tensor] = None,
463
+ temb: Optional[torch.Tensor] = None,
464
+ image_rotary_emb: torch.Tensor = None,
465
+ ) -> torch.Tensor:
466
+ wtype = hidden_states.dtype
467
+ shift_msa_i, scale_msa_i, gate_msa_i, shift_mlp_i, scale_mlp_i, gate_mlp_i = self.adaLN_modulation(temb)[
468
+ :, None
469
+ ].chunk(6, dim=-1)
470
+
471
+ # 1. MM-Attention
472
+ norm_hidden_states = self.norm1_i(hidden_states).to(dtype=wtype)
473
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa_i) + shift_msa_i
474
+ attn_output_i = self.attn1(
475
+ norm_hidden_states,
476
+ hidden_states_masks,
477
+ image_rotary_emb=image_rotary_emb,
478
+ )
479
+ hidden_states = gate_msa_i * attn_output_i + hidden_states
480
+
481
+ # 2. Feed-forward
482
+ norm_hidden_states = self.norm3_i(hidden_states).to(dtype=wtype)
483
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp_i) + shift_mlp_i
484
+ ff_output_i = gate_mlp_i * self.ff_i(norm_hidden_states.to(dtype=wtype))
485
+ hidden_states = ff_output_i + hidden_states
486
+ return hidden_states
487
+
488
+
489
+ @maybe_allow_in_graph
490
+ class HiDreamImageTransformerBlock(nn.Module):
491
+ def __init__(
492
+ self,
493
+ dim: int,
494
+ num_attention_heads: int,
495
+ attention_head_dim: int,
496
+ num_routed_experts: int = 4,
497
+ num_activated_experts: int = 2,
498
+ _force_inference_output: bool = False,
499
+ ):
500
+ super().__init__()
501
+ self.num_attention_heads = num_attention_heads
502
+ self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 12 * dim, bias=True))
503
+
504
+ # 1. Attention
505
+ self.norm1_i = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False)
506
+ self.norm1_t = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False)
507
+ self.attn1 = HiDreamAttention(
508
+ query_dim=dim,
509
+ heads=num_attention_heads,
510
+ dim_head=attention_head_dim,
511
+ processor=HiDreamAttnProcessor(),
512
+ single=False,
513
+ )
514
+
515
+ # 3. Feed-forward
516
+ self.norm3_i = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False)
517
+ if num_routed_experts > 0:
518
+ self.ff_i = MOEFeedForwardSwiGLU(
519
+ dim=dim,
520
+ hidden_dim=4 * dim,
521
+ num_routed_experts=num_routed_experts,
522
+ num_activated_experts=num_activated_experts,
523
+ _force_inference_output=_force_inference_output,
524
+ )
525
+ else:
526
+ self.ff_i = HiDreamImageFeedForwardSwiGLU(dim=dim, hidden_dim=4 * dim)
527
+ self.norm3_t = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False)
528
+ self.ff_t = HiDreamImageFeedForwardSwiGLU(dim=dim, hidden_dim=4 * dim)
529
+
530
+ def forward(
531
+ self,
532
+ hidden_states: torch.Tensor,
533
+ hidden_states_masks: Optional[torch.Tensor] = None,
534
+ encoder_hidden_states: Optional[torch.Tensor] = None,
535
+ temb: Optional[torch.Tensor] = None,
536
+ image_rotary_emb: torch.Tensor = None,
537
+ ) -> torch.Tensor:
538
+ wtype = hidden_states.dtype
539
+ (
540
+ shift_msa_i,
541
+ scale_msa_i,
542
+ gate_msa_i,
543
+ shift_mlp_i,
544
+ scale_mlp_i,
545
+ gate_mlp_i,
546
+ shift_msa_t,
547
+ scale_msa_t,
548
+ gate_msa_t,
549
+ shift_mlp_t,
550
+ scale_mlp_t,
551
+ gate_mlp_t,
552
+ ) = self.adaLN_modulation(temb)[:, None].chunk(12, dim=-1)
553
+
554
+ # 1. MM-Attention
555
+ norm_hidden_states = self.norm1_i(hidden_states).to(dtype=wtype)
556
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa_i) + shift_msa_i
557
+ norm_encoder_hidden_states = self.norm1_t(encoder_hidden_states).to(dtype=wtype)
558
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + scale_msa_t) + shift_msa_t
559
+
560
+ attn_output_i, attn_output_t = self.attn1(
561
+ norm_hidden_states,
562
+ hidden_states_masks,
563
+ norm_encoder_hidden_states,
564
+ image_rotary_emb=image_rotary_emb,
565
+ )
566
+
567
+ hidden_states = gate_msa_i * attn_output_i + hidden_states
568
+ encoder_hidden_states = gate_msa_t * attn_output_t + encoder_hidden_states
569
+
570
+ # 2. Feed-forward
571
+ norm_hidden_states = self.norm3_i(hidden_states).to(dtype=wtype)
572
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp_i) + shift_mlp_i
573
+ norm_encoder_hidden_states = self.norm3_t(encoder_hidden_states).to(dtype=wtype)
574
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + scale_mlp_t) + shift_mlp_t
575
+
576
+ ff_output_i = gate_mlp_i * self.ff_i(norm_hidden_states)
577
+ ff_output_t = gate_mlp_t * self.ff_t(norm_encoder_hidden_states)
578
+ hidden_states = ff_output_i + hidden_states
579
+ encoder_hidden_states = ff_output_t + encoder_hidden_states
580
+ return hidden_states, encoder_hidden_states
581
+
582
+
583
+ class HiDreamBlock(nn.Module):
584
+ def __init__(self, block: Union[HiDreamImageTransformerBlock, HiDreamImageSingleTransformerBlock]):
585
+ super().__init__()
586
+ self.block = block
587
+
588
+ def forward(
589
+ self,
590
+ hidden_states: torch.Tensor,
591
+ hidden_states_masks: Optional[torch.Tensor] = None,
592
+ encoder_hidden_states: Optional[torch.Tensor] = None,
593
+ temb: Optional[torch.Tensor] = None,
594
+ image_rotary_emb: torch.Tensor = None,
595
+ ) -> torch.Tensor:
596
+ return self.block(
597
+ hidden_states=hidden_states,
598
+ hidden_states_masks=hidden_states_masks,
599
+ encoder_hidden_states=encoder_hidden_states,
600
+ temb=temb,
601
+ image_rotary_emb=image_rotary_emb,
602
+ )
603
+
604
+
605
+ class HiDreamImageTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
606
+ _supports_gradient_checkpointing = True
607
+ _no_split_modules = ["HiDreamImageTransformerBlock", "HiDreamImageSingleTransformerBlock"]
608
+
609
+ @register_to_config
610
+ def __init__(
611
+ self,
612
+ patch_size: Optional[int] = None,
613
+ in_channels: int = 64,
614
+ out_channels: Optional[int] = None,
615
+ num_layers: int = 16,
616
+ num_single_layers: int = 32,
617
+ attention_head_dim: int = 128,
618
+ num_attention_heads: int = 20,
619
+ caption_channels: List[int] = None,
620
+ text_emb_dim: int = 2048,
621
+ num_routed_experts: int = 4,
622
+ num_activated_experts: int = 2,
623
+ axes_dims_rope: Tuple[int, int] = (32, 32),
624
+ max_resolution: Tuple[int, int] = (128, 128),
625
+ llama_layers: List[int] = None,
626
+ force_inference_output: bool = False,
627
+ ):
628
+ super().__init__()
629
+ self.out_channels = out_channels or in_channels
630
+ self.inner_dim = num_attention_heads * attention_head_dim
631
+
632
+ self.t_embedder = HiDreamImageTimestepEmbed(self.inner_dim)
633
+ self.p_embedder = HiDreamImagePooledEmbed(text_emb_dim, self.inner_dim)
634
+ self.x_embedder = HiDreamImagePatchEmbed(
635
+ patch_size=patch_size,
636
+ in_channels=in_channels,
637
+ out_channels=self.inner_dim,
638
+ )
639
+ self.pe_embedder = HiDreamImageEmbedND(theta=10000, axes_dim=axes_dims_rope)
640
+
641
+ self.double_stream_blocks = nn.ModuleList(
642
+ [
643
+ HiDreamBlock(
644
+ HiDreamImageTransformerBlock(
645
+ dim=self.inner_dim,
646
+ num_attention_heads=num_attention_heads,
647
+ attention_head_dim=attention_head_dim,
648
+ num_routed_experts=num_routed_experts,
649
+ num_activated_experts=num_activated_experts,
650
+ _force_inference_output=force_inference_output,
651
+ )
652
+ )
653
+ for _ in range(num_layers)
654
+ ]
655
+ )
656
+
657
+ self.single_stream_blocks = nn.ModuleList(
658
+ [
659
+ HiDreamBlock(
660
+ HiDreamImageSingleTransformerBlock(
661
+ dim=self.inner_dim,
662
+ num_attention_heads=num_attention_heads,
663
+ attention_head_dim=attention_head_dim,
664
+ num_routed_experts=num_routed_experts,
665
+ num_activated_experts=num_activated_experts,
666
+ _force_inference_output=force_inference_output,
667
+ )
668
+ )
669
+ for _ in range(num_single_layers)
670
+ ]
671
+ )
672
+
673
+ self.final_layer = HiDreamImageOutEmbed(self.inner_dim, patch_size, self.out_channels)
674
+
675
+ caption_channels = [caption_channels[1]] * (num_layers + num_single_layers) + [caption_channels[0]]
676
+ caption_projection = []
677
+ for caption_channel in caption_channels:
678
+ caption_projection.append(TextProjection(in_features=caption_channel, hidden_size=self.inner_dim))
679
+ self.caption_projection = nn.ModuleList(caption_projection)
680
+ self.max_seq = max_resolution[0] * max_resolution[1] // (patch_size * patch_size)
681
+
682
+ self.gradient_checkpointing = False
683
+
684
+ def unpatchify(self, x: torch.Tensor, img_sizes: List[Tuple[int, int]], is_training: bool) -> List[torch.Tensor]:
685
+ if is_training and not self.config.force_inference_output:
686
+ B, S, F = x.shape
687
+ C = F // (self.config.patch_size * self.config.patch_size)
688
+ x = (
689
+ x.reshape(B, S, self.config.patch_size, self.config.patch_size, C)
690
+ .permute(0, 4, 1, 2, 3)
691
+ .reshape(B, C, S, self.config.patch_size * self.config.patch_size)
692
+ )
693
+ else:
694
+ x_arr = []
695
+ p1 = self.config.patch_size
696
+ p2 = self.config.patch_size
697
+ for i, img_size in enumerate(img_sizes):
698
+ pH, pW = img_size
699
+ t = x[i, : pH * pW].reshape(1, pH, pW, -1)
700
+ F_token = t.shape[-1]
701
+ C = F_token // (p1 * p2)
702
+ t = t.reshape(1, pH, pW, p1, p2, C)
703
+ t = t.permute(0, 5, 1, 3, 2, 4)
704
+ t = t.reshape(1, C, pH * p1, pW * p2)
705
+ x_arr.append(t)
706
+ x = torch.cat(x_arr, dim=0)
707
+ return x
708
+
709
+ def patchify(self, hidden_states):
710
+ batch_size, channels, height, width = hidden_states.shape
711
+ patch_size = self.config.patch_size
712
+ patch_height, patch_width = height // patch_size, width // patch_size
713
+ device = hidden_states.device
714
+ dtype = hidden_states.dtype
715
+
716
+ # create img_sizes
717
+ img_sizes = torch.tensor([patch_height, patch_width], dtype=torch.int64, device=device).reshape(-1)
718
+ img_sizes = img_sizes.unsqueeze(0).repeat(batch_size, 1)
719
+
720
+ # create hidden_states_masks
721
+ if hidden_states.shape[-2] != hidden_states.shape[-1]:
722
+ hidden_states_masks = torch.zeros((batch_size, self.max_seq), dtype=dtype, device=device)
723
+ hidden_states_masks[:, : patch_height * patch_width] = 1.0
724
+ else:
725
+ hidden_states_masks = None
726
+
727
+ # create img_ids
728
+ img_ids = torch.zeros(patch_height, patch_width, 3, device=device)
729
+ row_indices = torch.arange(patch_height, device=device)[:, None]
730
+ col_indices = torch.arange(patch_width, device=device)[None, :]
731
+ img_ids[..., 1] = img_ids[..., 1] + row_indices
732
+ img_ids[..., 2] = img_ids[..., 2] + col_indices
733
+ img_ids = img_ids.reshape(patch_height * patch_width, -1)
734
+
735
+ if hidden_states.shape[-2] != hidden_states.shape[-1]:
736
+ # Handle non-square latents
737
+ img_ids_pad = torch.zeros(self.max_seq, 3, device=device)
738
+ img_ids_pad[: patch_height * patch_width, :] = img_ids
739
+ img_ids = img_ids_pad.unsqueeze(0).repeat(batch_size, 1, 1)
740
+ else:
741
+ img_ids = img_ids.unsqueeze(0).repeat(batch_size, 1, 1)
742
+
743
+ # patchify hidden_states
744
+ if hidden_states.shape[-2] != hidden_states.shape[-1]:
745
+ # Handle non-square latents
746
+ out = torch.zeros(
747
+ (batch_size, channels, self.max_seq, patch_size * patch_size),
748
+ dtype=dtype,
749
+ device=device,
750
+ )
751
+ hidden_states = hidden_states.reshape(
752
+ batch_size, channels, patch_height, patch_size, patch_width, patch_size
753
+ )
754
+ hidden_states = hidden_states.permute(0, 1, 2, 4, 3, 5)
755
+ hidden_states = hidden_states.reshape(
756
+ batch_size, channels, patch_height * patch_width, patch_size * patch_size
757
+ )
758
+ out[:, :, 0 : patch_height * patch_width] = hidden_states
759
+ hidden_states = out
760
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
761
+ batch_size, self.max_seq, patch_size * patch_size * channels
762
+ )
763
+
764
+ else:
765
+ # Handle square latents
766
+ hidden_states = hidden_states.reshape(
767
+ batch_size, channels, patch_height, patch_size, patch_width, patch_size
768
+ )
769
+ hidden_states = hidden_states.permute(0, 2, 4, 3, 5, 1)
770
+ hidden_states = hidden_states.reshape(
771
+ batch_size, patch_height * patch_width, patch_size * patch_size * channels
772
+ )
773
+
774
+ return hidden_states, hidden_states_masks, img_sizes, img_ids
775
+
776
+ def forward(
777
+ self,
778
+ hidden_states: torch.Tensor,
779
+ timesteps: torch.LongTensor = None,
780
+ encoder_hidden_states_t5: torch.Tensor = None,
781
+ encoder_hidden_states_llama3: torch.Tensor = None,
782
+ pooled_embeds: torch.Tensor = None,
783
+ img_ids: Optional[torch.Tensor] = None,
784
+ img_sizes: Optional[List[Tuple[int, int]]] = None,
785
+ hidden_states_masks: Optional[torch.Tensor] = None,
786
+ attention_kwargs: Optional[Dict[str, Any]] = None,
787
+ return_dict: bool = True,
788
+ **kwargs,
789
+ ):
790
+ encoder_hidden_states = kwargs.get("encoder_hidden_states", None)
791
+
792
+ if encoder_hidden_states is not None:
793
+ deprecation_message = "The `encoder_hidden_states` argument is deprecated. Please use `encoder_hidden_states_t5` and `encoder_hidden_states_llama3` instead."
794
+ deprecate("encoder_hidden_states", "0.35.0", deprecation_message)
795
+ encoder_hidden_states_t5 = encoder_hidden_states[0]
796
+ encoder_hidden_states_llama3 = encoder_hidden_states[1]
797
+
798
+ if img_ids is not None and img_sizes is not None and hidden_states_masks is None:
799
+ deprecation_message = (
800
+ "Passing `img_ids` and `img_sizes` with unpachified `hidden_states` is deprecated and will be ignored."
801
+ )
802
+ deprecate("img_ids", "0.35.0", deprecation_message)
803
+
804
+ if hidden_states_masks is not None and (img_ids is None or img_sizes is None):
805
+ raise ValueError("if `hidden_states_masks` is passed, `img_ids` and `img_sizes` must also be passed.")
806
+ elif hidden_states_masks is not None and hidden_states.ndim != 3:
807
+ raise ValueError(
808
+ "if `hidden_states_masks` is passed, `hidden_states` must be a 3D tensors with shape (batch_size, patch_height * patch_width, patch_size * patch_size * channels)"
809
+ )
810
+
811
+ if attention_kwargs is not None:
812
+ attention_kwargs = attention_kwargs.copy()
813
+ lora_scale = attention_kwargs.pop("scale", 1.0)
814
+ else:
815
+ lora_scale = 1.0
816
+
817
+ if USE_PEFT_BACKEND:
818
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
819
+ scale_lora_layers(self, lora_scale)
820
+ else:
821
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
822
+ logger.warning(
823
+ "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective."
824
+ )
825
+
826
+ # spatial forward
827
+ batch_size = hidden_states.shape[0]
828
+ hidden_states_type = hidden_states.dtype
829
+
830
+ # Patchify the input
831
+ if hidden_states_masks is None:
832
+ hidden_states, hidden_states_masks, img_sizes, img_ids = self.patchify(hidden_states)
833
+
834
+ # Embed the hidden states
835
+ hidden_states = self.x_embedder(hidden_states)
836
+
837
+ # 0. time
838
+ timesteps = self.t_embedder(timesteps, hidden_states_type)
839
+ p_embedder = self.p_embedder(pooled_embeds)
840
+ temb = timesteps + p_embedder
841
+
842
+ encoder_hidden_states = [encoder_hidden_states_llama3[k] for k in self.config.llama_layers]
843
+
844
+ if self.caption_projection is not None:
845
+ new_encoder_hidden_states = []
846
+ for i, enc_hidden_state in enumerate(encoder_hidden_states):
847
+ enc_hidden_state = self.caption_projection[i](enc_hidden_state)
848
+ enc_hidden_state = enc_hidden_state.view(batch_size, -1, hidden_states.shape[-1])
849
+ new_encoder_hidden_states.append(enc_hidden_state)
850
+ encoder_hidden_states = new_encoder_hidden_states
851
+ encoder_hidden_states_t5 = self.caption_projection[-1](encoder_hidden_states_t5)
852
+ encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, -1, hidden_states.shape[-1])
853
+ encoder_hidden_states.append(encoder_hidden_states_t5)
854
+
855
+ txt_ids = torch.zeros(
856
+ batch_size,
857
+ encoder_hidden_states[-1].shape[1]
858
+ + encoder_hidden_states[-2].shape[1]
859
+ + encoder_hidden_states[0].shape[1],
860
+ 3,
861
+ device=img_ids.device,
862
+ dtype=img_ids.dtype,
863
+ )
864
+ ids = torch.cat((img_ids, txt_ids), dim=1)
865
+ image_rotary_emb = self.pe_embedder(ids)
866
+
867
+ # 2. Blocks
868
+ block_id = 0
869
+ initial_encoder_hidden_states = torch.cat([encoder_hidden_states[-1], encoder_hidden_states[-2]], dim=1)
870
+ initial_encoder_hidden_states_seq_len = initial_encoder_hidden_states.shape[1]
871
+ for bid, block in enumerate(self.double_stream_blocks):
872
+ cur_llama31_encoder_hidden_states = encoder_hidden_states[block_id]
873
+ cur_encoder_hidden_states = torch.cat(
874
+ [initial_encoder_hidden_states, cur_llama31_encoder_hidden_states], dim=1
875
+ )
876
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
877
+ hidden_states, initial_encoder_hidden_states = self._gradient_checkpointing_func(
878
+ block,
879
+ hidden_states,
880
+ hidden_states_masks,
881
+ cur_encoder_hidden_states,
882
+ temb,
883
+ image_rotary_emb,
884
+ )
885
+ else:
886
+ hidden_states, initial_encoder_hidden_states = block(
887
+ hidden_states=hidden_states,
888
+ hidden_states_masks=hidden_states_masks,
889
+ encoder_hidden_states=cur_encoder_hidden_states,
890
+ temb=temb,
891
+ image_rotary_emb=image_rotary_emb,
892
+ )
893
+ initial_encoder_hidden_states = initial_encoder_hidden_states[:, :initial_encoder_hidden_states_seq_len]
894
+ block_id += 1
895
+
896
+ image_tokens_seq_len = hidden_states.shape[1]
897
+ hidden_states = torch.cat([hidden_states, initial_encoder_hidden_states], dim=1)
898
+ hidden_states_seq_len = hidden_states.shape[1]
899
+ if hidden_states_masks is not None:
900
+ encoder_attention_mask_ones = torch.ones(
901
+ (batch_size, initial_encoder_hidden_states.shape[1] + cur_llama31_encoder_hidden_states.shape[1]),
902
+ device=hidden_states_masks.device,
903
+ dtype=hidden_states_masks.dtype,
904
+ )
905
+ hidden_states_masks = torch.cat([hidden_states_masks, encoder_attention_mask_ones], dim=1)
906
+
907
+ for bid, block in enumerate(self.single_stream_blocks):
908
+ cur_llama31_encoder_hidden_states = encoder_hidden_states[block_id]
909
+ hidden_states = torch.cat([hidden_states, cur_llama31_encoder_hidden_states], dim=1)
910
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
911
+ hidden_states = self._gradient_checkpointing_func(
912
+ block,
913
+ hidden_states,
914
+ hidden_states_masks,
915
+ None,
916
+ temb,
917
+ image_rotary_emb,
918
+ )
919
+ else:
920
+ hidden_states = block(
921
+ hidden_states=hidden_states,
922
+ hidden_states_masks=hidden_states_masks,
923
+ encoder_hidden_states=None,
924
+ temb=temb,
925
+ image_rotary_emb=image_rotary_emb,
926
+ )
927
+ hidden_states = hidden_states[:, :hidden_states_seq_len]
928
+ block_id += 1
929
+
930
+ hidden_states = hidden_states[:, :image_tokens_seq_len, ...]
931
+ output = self.final_layer(hidden_states, temb)
932
+ output = self.unpatchify(output, img_sizes, self.training)
933
+ if hidden_states_masks is not None:
934
+ hidden_states_masks = hidden_states_masks[:, :image_tokens_seq_len]
935
+
936
+ if USE_PEFT_BACKEND:
937
+ # remove `lora_scale` from each PEFT layer
938
+ unscale_lora_layers(self, lora_scale)
939
+
940
+ if not return_dict:
941
+ return (output,)
942
+ return Transformer2DModelOutput(sample=output)