SuperCS commited on
Commit
4c25524
·
verified ·
1 Parent(s): eddf9bb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/PKG-INFO +10 -0
  2. exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/SOURCES.txt +243 -0
  3. exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/dependency_links.txt +1 -0
  4. exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/requires.txt +16 -0
  5. exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/top_level.txt +1 -0
  6. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_controlnet.cpython-311.pyc +0 -0
  7. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_ipadapter.cpython-311.pyc +0 -0
  8. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_motion.cpython-311.pyc +0 -0
  9. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_text_encoder.cpython-311.pyc +0 -0
  10. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_vae_decoder.cpython-311.pyc +0 -0
  11. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_vae_encoder.cpython-311.pyc +0 -0
  12. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/step1x_connector.cpython-311.pyc +0 -0
  13. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/stepvideo_dit.cpython-311.pyc +0 -0
  14. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/stepvideo_text_encoder.cpython-311.pyc +0 -0
  15. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/stepvideo_vae.cpython-311.pyc +0 -0
  16. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_image_encoder.cpython-311.pyc +0 -0
  17. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_vae_decoder.cpython-311.pyc +0 -0
  18. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_vae_encoder.cpython-311.pyc +0 -0
  19. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/tiler.cpython-311.pyc +0 -0
  20. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/utils.cpython-311.pyc +0 -0
  21. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_camera_controller.cpython-311.pyc +0 -0
  22. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_dit.cpython-311.pyc +0 -0
  23. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_dit_s2v.cpython-311.pyc +0 -0
  24. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_image_encoder.cpython-311.pyc +0 -0
  25. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_motion_controller.cpython-311.pyc +0 -0
  26. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_text_encoder.cpython-311.pyc +0 -0
  27. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_vace.cpython-311.pyc +0 -0
  28. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_vae.cpython-311.pyc +0 -0
  29. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wav2vec.cpython-311.pyc +0 -0
  30. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__init__.py +15 -0
  31. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/__init__.cpython-311.pyc +0 -0
  32. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/base.cpython-311.pyc +0 -0
  33. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/cog_video.cpython-311.pyc +0 -0
  34. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/dancer.cpython-311.pyc +0 -0
  35. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/flux_image.cpython-311.pyc +0 -0
  36. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/hunyuan_image.cpython-311.pyc +0 -0
  37. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/hunyuan_video.cpython-311.pyc +0 -0
  38. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/omnigen_image.cpython-311.pyc +0 -0
  39. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/pipeline_runner.cpython-311.pyc +0 -0
  40. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sd3_image.cpython-311.pyc +0 -0
  41. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sd_image.cpython-311.pyc +0 -0
  42. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sd_video.cpython-311.pyc +0 -0
  43. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sdxl_image.cpython-311.pyc +0 -0
  44. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sdxl_video.cpython-311.pyc +0 -0
  45. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/step_video.cpython-311.pyc +0 -0
  46. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/svd_video.cpython-311.pyc +0 -0
  47. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/wan_video.cpython-311.pyc +0 -0
  48. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/wan_video_new.cpython-311.pyc +0 -0
  49. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/base.py +127 -0
  50. exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/cog_video.py +135 -0
exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/PKG-INFO ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: diffsynth
3
+ Version: 1.1.7
4
+ Summary: Enjoy the magic of Diffusion models!
5
+ Author: Artiprocher
6
+ Classifier: Programming Language :: Python :: 3
7
+ Classifier: License :: OSI Approved :: Apache Software License
8
+ Classifier: Operating System :: OS Independent
9
+ Requires-Python: >=3.6
10
+ License-File: LICENSE
exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LICENSE
2
+ README.md
3
+ setup.py
4
+ diffsynth/__init__.py
5
+ diffsynth.egg-info/PKG-INFO
6
+ diffsynth.egg-info/SOURCES.txt
7
+ diffsynth.egg-info/dependency_links.txt
8
+ diffsynth.egg-info/requires.txt
9
+ diffsynth.egg-info/top_level.txt
10
+ diffsynth/configs/__init__.py
11
+ diffsynth/configs/model_config.py
12
+ diffsynth/controlnets/__init__.py
13
+ diffsynth/controlnets/controlnet_unit.py
14
+ diffsynth/controlnets/processors.py
15
+ diffsynth/data/__init__.py
16
+ diffsynth/data/simple_text_image.py
17
+ diffsynth/data/video.py
18
+ diffsynth/distributed/__init__.py
19
+ diffsynth/distributed/xdit_context_parallel.py
20
+ diffsynth/extensions/__init__.py
21
+ diffsynth/extensions/ESRGAN/__init__.py
22
+ diffsynth/extensions/FastBlend/__init__.py
23
+ diffsynth/extensions/FastBlend/api.py
24
+ diffsynth/extensions/FastBlend/cupy_kernels.py
25
+ diffsynth/extensions/FastBlend/data.py
26
+ diffsynth/extensions/FastBlend/patch_match.py
27
+ diffsynth/extensions/FastBlend/runners/__init__.py
28
+ diffsynth/extensions/FastBlend/runners/accurate.py
29
+ diffsynth/extensions/FastBlend/runners/balanced.py
30
+ diffsynth/extensions/FastBlend/runners/fast.py
31
+ diffsynth/extensions/FastBlend/runners/interpolation.py
32
+ diffsynth/extensions/ImageQualityMetric/__init__.py
33
+ diffsynth/extensions/ImageQualityMetric/aesthetic.py
34
+ diffsynth/extensions/ImageQualityMetric/clip.py
35
+ diffsynth/extensions/ImageQualityMetric/config.py
36
+ diffsynth/extensions/ImageQualityMetric/hps.py
37
+ diffsynth/extensions/ImageQualityMetric/imagereward.py
38
+ diffsynth/extensions/ImageQualityMetric/mps.py
39
+ diffsynth/extensions/ImageQualityMetric/pickscore.py
40
+ diffsynth/extensions/ImageQualityMetric/BLIP/__init__.py
41
+ diffsynth/extensions/ImageQualityMetric/BLIP/blip.py
42
+ diffsynth/extensions/ImageQualityMetric/BLIP/blip_pretrain.py
43
+ diffsynth/extensions/ImageQualityMetric/BLIP/med.py
44
+ diffsynth/extensions/ImageQualityMetric/BLIP/vit.py
45
+ diffsynth/extensions/ImageQualityMetric/open_clip/__init__.py
46
+ diffsynth/extensions/ImageQualityMetric/open_clip/coca_model.py
47
+ diffsynth/extensions/ImageQualityMetric/open_clip/constants.py
48
+ diffsynth/extensions/ImageQualityMetric/open_clip/factory.py
49
+ diffsynth/extensions/ImageQualityMetric/open_clip/generation_utils.py
50
+ diffsynth/extensions/ImageQualityMetric/open_clip/hf_configs.py
51
+ diffsynth/extensions/ImageQualityMetric/open_clip/hf_model.py
52
+ diffsynth/extensions/ImageQualityMetric/open_clip/loss.py
53
+ diffsynth/extensions/ImageQualityMetric/open_clip/model.py
54
+ diffsynth/extensions/ImageQualityMetric/open_clip/modified_resnet.py
55
+ diffsynth/extensions/ImageQualityMetric/open_clip/openai.py
56
+ diffsynth/extensions/ImageQualityMetric/open_clip/pretrained.py
57
+ diffsynth/extensions/ImageQualityMetric/open_clip/push_to_hf_hub.py
58
+ diffsynth/extensions/ImageQualityMetric/open_clip/timm_model.py
59
+ diffsynth/extensions/ImageQualityMetric/open_clip/tokenizer.py
60
+ diffsynth/extensions/ImageQualityMetric/open_clip/transform.py
61
+ diffsynth/extensions/ImageQualityMetric/open_clip/transformer.py
62
+ diffsynth/extensions/ImageQualityMetric/open_clip/utils.py
63
+ diffsynth/extensions/ImageQualityMetric/open_clip/version.py
64
+ diffsynth/extensions/ImageQualityMetric/trainer/__init__.py
65
+ diffsynth/extensions/ImageQualityMetric/trainer/models/__init__.py
66
+ diffsynth/extensions/ImageQualityMetric/trainer/models/base_model.py
67
+ diffsynth/extensions/ImageQualityMetric/trainer/models/clip_model.py
68
+ diffsynth/extensions/ImageQualityMetric/trainer/models/cross_modeling.py
69
+ diffsynth/extensions/RIFE/__init__.py
70
+ diffsynth/lora/__init__.py
71
+ diffsynth/lora/flux_lora.py
72
+ diffsynth/models/__init__.py
73
+ diffsynth/models/attention.py
74
+ diffsynth/models/cog_dit.py
75
+ diffsynth/models/cog_vae.py
76
+ diffsynth/models/downloader.py
77
+ diffsynth/models/flux_controlnet.py
78
+ diffsynth/models/flux_dit.py
79
+ diffsynth/models/flux_infiniteyou.py
80
+ diffsynth/models/flux_ipadapter.py
81
+ diffsynth/models/flux_lora_encoder.py
82
+ diffsynth/models/flux_text_encoder.py
83
+ diffsynth/models/flux_vae.py
84
+ diffsynth/models/flux_value_control.py
85
+ diffsynth/models/hunyuan_dit.py
86
+ diffsynth/models/hunyuan_dit_text_encoder.py
87
+ diffsynth/models/hunyuan_video_dit.py
88
+ diffsynth/models/hunyuan_video_text_encoder.py
89
+ diffsynth/models/hunyuan_video_vae_decoder.py
90
+ diffsynth/models/hunyuan_video_vae_encoder.py
91
+ diffsynth/models/kolors_text_encoder.py
92
+ diffsynth/models/lora.py
93
+ diffsynth/models/model_manager.py
94
+ diffsynth/models/nexus_gen.py
95
+ diffsynth/models/nexus_gen_ar_model.py
96
+ diffsynth/models/nexus_gen_projector.py
97
+ diffsynth/models/omnigen.py
98
+ diffsynth/models/qwen_image_controlnet.py
99
+ diffsynth/models/qwen_image_dit.py
100
+ diffsynth/models/qwen_image_text_encoder.py
101
+ diffsynth/models/qwen_image_vae.py
102
+ diffsynth/models/qwenvl.py
103
+ diffsynth/models/sd3_dit.py
104
+ diffsynth/models/sd3_text_encoder.py
105
+ diffsynth/models/sd3_vae_decoder.py
106
+ diffsynth/models/sd3_vae_encoder.py
107
+ diffsynth/models/sd_controlnet.py
108
+ diffsynth/models/sd_ipadapter.py
109
+ diffsynth/models/sd_motion.py
110
+ diffsynth/models/sd_text_encoder.py
111
+ diffsynth/models/sd_unet.py
112
+ diffsynth/models/sd_vae_decoder.py
113
+ diffsynth/models/sd_vae_encoder.py
114
+ diffsynth/models/sdxl_controlnet.py
115
+ diffsynth/models/sdxl_ipadapter.py
116
+ diffsynth/models/sdxl_motion.py
117
+ diffsynth/models/sdxl_text_encoder.py
118
+ diffsynth/models/sdxl_unet.py
119
+ diffsynth/models/sdxl_vae_decoder.py
120
+ diffsynth/models/sdxl_vae_encoder.py
121
+ diffsynth/models/step1x_connector.py
122
+ diffsynth/models/stepvideo_dit.py
123
+ diffsynth/models/stepvideo_text_encoder.py
124
+ diffsynth/models/stepvideo_vae.py
125
+ diffsynth/models/svd_image_encoder.py
126
+ diffsynth/models/svd_unet.py
127
+ diffsynth/models/svd_vae_decoder.py
128
+ diffsynth/models/svd_vae_encoder.py
129
+ diffsynth/models/tiler.py
130
+ diffsynth/models/utils.py
131
+ diffsynth/models/wan_video_camera_controller.py
132
+ diffsynth/models/wan_video_dit.py
133
+ diffsynth/models/wan_video_dit_s2v.py
134
+ diffsynth/models/wan_video_image_encoder.py
135
+ diffsynth/models/wan_video_motion_controller.py
136
+ diffsynth/models/wan_video_text_encoder.py
137
+ diffsynth/models/wan_video_vace.py
138
+ diffsynth/models/wan_video_vae.py
139
+ diffsynth/models/wav2vec.py
140
+ diffsynth/pipelines/__init__.py
141
+ diffsynth/pipelines/base.py
142
+ diffsynth/pipelines/cog_video.py
143
+ diffsynth/pipelines/dancer.py
144
+ diffsynth/pipelines/flux_image.py
145
+ diffsynth/pipelines/flux_image_new.py
146
+ diffsynth/pipelines/hunyuan_image.py
147
+ diffsynth/pipelines/hunyuan_video.py
148
+ diffsynth/pipelines/omnigen_image.py
149
+ diffsynth/pipelines/pipeline_runner.py
150
+ diffsynth/pipelines/qwen_image.py
151
+ diffsynth/pipelines/sd3_image.py
152
+ diffsynth/pipelines/sd_image.py
153
+ diffsynth/pipelines/sd_video.py
154
+ diffsynth/pipelines/sdxl_image.py
155
+ diffsynth/pipelines/sdxl_video.py
156
+ diffsynth/pipelines/step_video.py
157
+ diffsynth/pipelines/svd_video.py
158
+ diffsynth/pipelines/wan_video.py
159
+ diffsynth/pipelines/wan_video_new.py
160
+ diffsynth/processors/FastBlend.py
161
+ diffsynth/processors/PILEditor.py
162
+ diffsynth/processors/RIFE.py
163
+ diffsynth/processors/__init__.py
164
+ diffsynth/processors/base.py
165
+ diffsynth/processors/sequencial_processor.py
166
+ diffsynth/prompters/__init__.py
167
+ diffsynth/prompters/base_prompter.py
168
+ diffsynth/prompters/cog_prompter.py
169
+ diffsynth/prompters/flux_prompter.py
170
+ diffsynth/prompters/hunyuan_dit_prompter.py
171
+ diffsynth/prompters/hunyuan_video_prompter.py
172
+ diffsynth/prompters/kolors_prompter.py
173
+ diffsynth/prompters/omnigen_prompter.py
174
+ diffsynth/prompters/omost.py
175
+ diffsynth/prompters/prompt_refiners.py
176
+ diffsynth/prompters/sd3_prompter.py
177
+ diffsynth/prompters/sd_prompter.py
178
+ diffsynth/prompters/sdxl_prompter.py
179
+ diffsynth/prompters/stepvideo_prompter.py
180
+ diffsynth/prompters/wan_prompter.py
181
+ diffsynth/schedulers/__init__.py
182
+ diffsynth/schedulers/continuous_ode.py
183
+ diffsynth/schedulers/ddim.py
184
+ diffsynth/schedulers/flow_match.py
185
+ diffsynth/tokenizer_configs/__init__.py
186
+ diffsynth/tokenizer_configs/cog/tokenizer/added_tokens.json
187
+ diffsynth/tokenizer_configs/cog/tokenizer/special_tokens_map.json
188
+ diffsynth/tokenizer_configs/cog/tokenizer/spiece.model
189
+ diffsynth/tokenizer_configs/cog/tokenizer/tokenizer_config.json
190
+ diffsynth/tokenizer_configs/flux/tokenizer_1/merges.txt
191
+ diffsynth/tokenizer_configs/flux/tokenizer_1/special_tokens_map.json
192
+ diffsynth/tokenizer_configs/flux/tokenizer_1/tokenizer_config.json
193
+ diffsynth/tokenizer_configs/flux/tokenizer_1/vocab.json
194
+ diffsynth/tokenizer_configs/flux/tokenizer_2/special_tokens_map.json
195
+ diffsynth/tokenizer_configs/flux/tokenizer_2/spiece.model
196
+ diffsynth/tokenizer_configs/flux/tokenizer_2/tokenizer.json
197
+ diffsynth/tokenizer_configs/flux/tokenizer_2/tokenizer_config.json
198
+ diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/special_tokens_map.json
199
+ diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/tokenizer_config.json
200
+ diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/vocab.txt
201
+ diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/vocab_org.txt
202
+ diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/config.json
203
+ diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/special_tokens_map.json
204
+ diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/spiece.model
205
+ diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/tokenizer_config.json
206
+ diffsynth/tokenizer_configs/hunyuan_video/tokenizer_1/merges.txt
207
+ diffsynth/tokenizer_configs/hunyuan_video/tokenizer_1/special_tokens_map.json
208
+ diffsynth/tokenizer_configs/hunyuan_video/tokenizer_1/tokenizer_config.json
209
+ diffsynth/tokenizer_configs/hunyuan_video/tokenizer_1/vocab.json
210
+ diffsynth/tokenizer_configs/hunyuan_video/tokenizer_2/preprocessor_config.json
211
+ diffsynth/tokenizer_configs/hunyuan_video/tokenizer_2/special_tokens_map.json
212
+ diffsynth/tokenizer_configs/hunyuan_video/tokenizer_2/tokenizer.json
213
+ diffsynth/tokenizer_configs/hunyuan_video/tokenizer_2/tokenizer_config.json
214
+ diffsynth/tokenizer_configs/kolors/tokenizer/tokenizer.model
215
+ diffsynth/tokenizer_configs/kolors/tokenizer/tokenizer_config.json
216
+ diffsynth/tokenizer_configs/kolors/tokenizer/vocab.txt
217
+ diffsynth/tokenizer_configs/stable_diffusion/tokenizer/merges.txt
218
+ diffsynth/tokenizer_configs/stable_diffusion/tokenizer/special_tokens_map.json
219
+ diffsynth/tokenizer_configs/stable_diffusion/tokenizer/tokenizer_config.json
220
+ diffsynth/tokenizer_configs/stable_diffusion/tokenizer/vocab.json
221
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_1/merges.txt
222
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_1/special_tokens_map.json
223
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_1/tokenizer_config.json
224
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_1/vocab.json
225
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_2/merges.txt
226
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_2/special_tokens_map.json
227
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_2/tokenizer_config.json
228
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_2/vocab.json
229
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/special_tokens_map.json
230
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/spiece.model
231
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/tokenizer.json
232
+ diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/tokenizer_config.json
233
+ diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/merges.txt
234
+ diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/special_tokens_map.json
235
+ diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/tokenizer_config.json
236
+ diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/vocab.json
237
+ diffsynth/trainers/__init__.py
238
+ diffsynth/trainers/text_to_image.py
239
+ diffsynth/trainers/utils.py
240
+ diffsynth/utils/__init__.py
241
+ diffsynth/vram_management/__init__.py
242
+ diffsynth/vram_management/gradient_checkpointing.py
243
+ diffsynth/vram_management/layers.py
exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/requires.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=2.0.0
2
+ torchvision
3
+ cupy-cuda12x
4
+ transformers
5
+ controlnet-aux==0.0.7
6
+ imageio
7
+ imageio[ffmpeg]
8
+ safetensors
9
+ einops
10
+ sentencepiece
11
+ protobuf
12
+ modelscope
13
+ ftfy
14
+ pynvml
15
+ pandas
16
+ accelerate
exp_code/1_benchmark/DiffSynth-Studio/diffsynth.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ diffsynth
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_controlnet.cpython-311.pyc ADDED
Binary file (20.1 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_ipadapter.cpython-311.pyc ADDED
Binary file (13.8 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_motion.cpython-311.pyc ADDED
Binary file (6.79 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_text_encoder.cpython-311.pyc ADDED
Binary file (85.7 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_vae_decoder.cpython-311.pyc ADDED
Binary file (2.34 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_vae_encoder.cpython-311.pyc ADDED
Binary file (2.34 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/step1x_connector.cpython-311.pyc ADDED
Binary file (31.1 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/stepvideo_dit.cpython-311.pyc ADDED
Binary file (49.3 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/stepvideo_text_encoder.cpython-311.pyc ADDED
Binary file (30.2 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/stepvideo_vae.cpython-311.pyc ADDED
Binary file (66.6 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_image_encoder.cpython-311.pyc ADDED
Binary file (69.5 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_vae_decoder.cpython-311.pyc ADDED
Binary file (47.9 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_vae_encoder.cpython-311.pyc ADDED
Binary file (14.6 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/tiler.cpython-311.pyc ADDED
Binary file (14.1 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/utils.cpython-311.pyc ADDED
Binary file (12.8 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_camera_controller.cpython-311.pyc ADDED
Binary file (12.5 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_dit.cpython-311.pyc ADDED
Binary file (39.1 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_dit_s2v.cpython-311.pyc ADDED
Binary file (44.5 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_image_encoder.cpython-311.pyc ADDED
Binary file (43.1 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_motion_controller.cpython-311.pyc ADDED
Binary file (3.35 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_text_encoder.cpython-311.pyc ADDED
Binary file (18.2 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_vace.cpython-311.pyc ADDED
Binary file (8.46 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wan_video_vae.cpython-311.pyc ADDED
Binary file (71.7 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/wav2vec.cpython-311.pyc ADDED
Binary file (12.4 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .sd_image import SDImagePipeline
2
+ from .sd_video import SDVideoPipeline
3
+ from .sdxl_image import SDXLImagePipeline
4
+ from .sdxl_video import SDXLVideoPipeline
5
+ from .sd3_image import SD3ImagePipeline
6
+ from .hunyuan_image import HunyuanDiTImagePipeline
7
+ from .svd_video import SVDVideoPipeline
8
+ from .flux_image import FluxImagePipeline
9
+ from .cog_video import CogVideoPipeline
10
+ from .omnigen_image import OmnigenImagePipeline
11
+ from .pipeline_runner import SDVideoPipelineRunner
12
+ from .hunyuan_video import HunyuanVideoPipeline
13
+ from .step_video import StepVideoPipeline
14
+ from .wan_video import WanVideoPipeline
15
+ KolorsImagePipeline = SDXLImagePipeline
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (1.21 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/base.cpython-311.pyc ADDED
Binary file (9.79 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/cog_video.cpython-311.pyc ADDED
Binary file (8.1 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/dancer.cpython-311.pyc ADDED
Binary file (11.1 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/flux_image.cpython-311.pyc ADDED
Binary file (45.6 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/hunyuan_image.cpython-311.pyc ADDED
Binary file (17 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/hunyuan_video.cpython-311.pyc ADDED
Binary file (25.9 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/omnigen_image.cpython-311.pyc ADDED
Binary file (19.4 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/pipeline_runner.cpython-311.pyc ADDED
Binary file (9.08 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sd3_image.cpython-311.pyc ADDED
Binary file (8.85 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sd_image.cpython-311.pyc ADDED
Binary file (11.1 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sd_video.cpython-311.pyc ADDED
Binary file (14.4 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sdxl_image.cpython-311.pyc ADDED
Binary file (13 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/sdxl_video.cpython-311.pyc ADDED
Binary file (12.8 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/step_video.cpython-311.pyc ADDED
Binary file (11.1 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/svd_video.cpython-311.pyc ADDED
Binary file (17.2 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/wan_video.cpython-311.pyc ADDED
Binary file (35.9 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/__pycache__/wan_video_new.cpython-311.pyc ADDED
Binary file (86.4 kB). View file
 
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/base.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from PIL import Image
4
+ from torchvision.transforms import GaussianBlur
5
+
6
+
7
+
8
+ class BasePipeline(torch.nn.Module):
9
+
10
+ def __init__(self, device="cuda", torch_dtype=torch.float16, height_division_factor=64, width_division_factor=64):
11
+ super().__init__()
12
+ self.device = device
13
+ self.torch_dtype = torch_dtype
14
+ self.height_division_factor = height_division_factor
15
+ self.width_division_factor = width_division_factor
16
+ self.cpu_offload = False
17
+ self.model_names = []
18
+
19
+
20
+ def check_resize_height_width(self, height, width):
21
+ if height % self.height_division_factor != 0:
22
+ height = (height + self.height_division_factor - 1) // self.height_division_factor * self.height_division_factor
23
+ print(f"The height cannot be evenly divided by {self.height_division_factor}. We round it up to {height}.")
24
+ if width % self.width_division_factor != 0:
25
+ width = (width + self.width_division_factor - 1) // self.width_division_factor * self.width_division_factor
26
+ print(f"The width cannot be evenly divided by {self.width_division_factor}. We round it up to {width}.")
27
+ return height, width
28
+
29
+
30
+ def preprocess_image(self, image):
31
+ image = torch.Tensor(np.array(image, dtype=np.float32) * (2 / 255) - 1).permute(2, 0, 1).unsqueeze(0)
32
+ return image
33
+
34
+
35
+ def preprocess_images(self, images):
36
+ return [self.preprocess_image(image) for image in images]
37
+
38
+
39
+ def vae_output_to_image(self, vae_output):
40
+ image = vae_output[0].cpu().float().permute(1, 2, 0).numpy()
41
+ image = Image.fromarray(((image / 2 + 0.5).clip(0, 1) * 255).astype("uint8"))
42
+ return image
43
+
44
+
45
+ def vae_output_to_video(self, vae_output):
46
+ video = vae_output.cpu().permute(1, 2, 0).numpy()
47
+ video = [Image.fromarray(((image / 2 + 0.5).clip(0, 1) * 255).astype("uint8")) for image in video]
48
+ return video
49
+
50
+
51
+ def merge_latents(self, value, latents, masks, scales, blur_kernel_size=33, blur_sigma=10.0):
52
+ if len(latents) > 0:
53
+ blur = GaussianBlur(kernel_size=blur_kernel_size, sigma=blur_sigma)
54
+ height, width = value.shape[-2:]
55
+ weight = torch.ones_like(value)
56
+ for latent, mask, scale in zip(latents, masks, scales):
57
+ mask = self.preprocess_image(mask.resize((width, height))).mean(dim=1, keepdim=True) > 0
58
+ mask = mask.repeat(1, latent.shape[1], 1, 1).to(dtype=latent.dtype, device=latent.device)
59
+ mask = blur(mask)
60
+ value += latent * mask * scale
61
+ weight += mask * scale
62
+ value /= weight
63
+ return value
64
+
65
+
66
+ def control_noise_via_local_prompts(self, prompt_emb_global, prompt_emb_locals, masks, mask_scales, inference_callback, special_kwargs=None, special_local_kwargs_list=None):
67
+ if special_kwargs is None:
68
+ noise_pred_global = inference_callback(prompt_emb_global)
69
+ else:
70
+ noise_pred_global = inference_callback(prompt_emb_global, special_kwargs)
71
+ if special_local_kwargs_list is None:
72
+ noise_pred_locals = [inference_callback(prompt_emb_local) for prompt_emb_local in prompt_emb_locals]
73
+ else:
74
+ noise_pred_locals = [inference_callback(prompt_emb_local, special_kwargs) for prompt_emb_local, special_kwargs in zip(prompt_emb_locals, special_local_kwargs_list)]
75
+ noise_pred = self.merge_latents(noise_pred_global, noise_pred_locals, masks, mask_scales)
76
+ return noise_pred
77
+
78
+
79
+ def extend_prompt(self, prompt, local_prompts, masks, mask_scales):
80
+ local_prompts = local_prompts or []
81
+ masks = masks or []
82
+ mask_scales = mask_scales or []
83
+ extended_prompt_dict = self.prompter.extend_prompt(prompt)
84
+ prompt = extended_prompt_dict.get("prompt", prompt)
85
+ local_prompts += extended_prompt_dict.get("prompts", [])
86
+ masks += extended_prompt_dict.get("masks", [])
87
+ mask_scales += [100.0] * len(extended_prompt_dict.get("masks", []))
88
+ return prompt, local_prompts, masks, mask_scales
89
+
90
+
91
+ def enable_cpu_offload(self):
92
+ self.cpu_offload = True
93
+
94
+
95
+ def load_models_to_device(self, loadmodel_names=[]):
96
+ # only load models to device if cpu_offload is enabled
97
+ if not self.cpu_offload:
98
+ return
99
+ # offload the unneeded models to cpu
100
+ for model_name in self.model_names:
101
+ if model_name not in loadmodel_names:
102
+ model = getattr(self, model_name)
103
+ if model is not None:
104
+ if hasattr(model, "vram_management_enabled") and model.vram_management_enabled:
105
+ for module in model.modules():
106
+ if hasattr(module, "offload"):
107
+ module.offload()
108
+ else:
109
+ model.cpu()
110
+ # load the needed models to device
111
+ for model_name in loadmodel_names:
112
+ model = getattr(self, model_name)
113
+ if model is not None:
114
+ if hasattr(model, "vram_management_enabled") and model.vram_management_enabled:
115
+ for module in model.modules():
116
+ if hasattr(module, "onload"):
117
+ module.onload()
118
+ else:
119
+ model.to(self.device)
120
+ # fresh the cuda cache
121
+ torch.cuda.empty_cache()
122
+
123
+
124
+ def generate_noise(self, shape, seed=None, device="cpu", dtype=torch.float16):
125
+ generator = None if seed is None else torch.Generator(device).manual_seed(seed)
126
+ noise = torch.randn(shape, generator=generator, device=device, dtype=dtype)
127
+ return noise
exp_code/1_benchmark/DiffSynth-Studio/diffsynth/pipelines/cog_video.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..models import ModelManager, FluxTextEncoder2, CogDiT, CogVAEEncoder, CogVAEDecoder
2
+ from ..prompters import CogPrompter
3
+ from ..schedulers import EnhancedDDIMScheduler
4
+ from .base import BasePipeline
5
+ import torch
6
+ from tqdm import tqdm
7
+ from PIL import Image
8
+ import numpy as np
9
+ from einops import rearrange
10
+
11
+
12
+
13
+ class CogVideoPipeline(BasePipeline):
14
+
15
+ def __init__(self, device="cuda", torch_dtype=torch.float16):
16
+ super().__init__(device=device, torch_dtype=torch_dtype, height_division_factor=16, width_division_factor=16)
17
+ self.scheduler = EnhancedDDIMScheduler(rescale_zero_terminal_snr=True, prediction_type="v_prediction")
18
+ self.prompter = CogPrompter()
19
+ # models
20
+ self.text_encoder: FluxTextEncoder2 = None
21
+ self.dit: CogDiT = None
22
+ self.vae_encoder: CogVAEEncoder = None
23
+ self.vae_decoder: CogVAEDecoder = None
24
+
25
+
26
+ def fetch_models(self, model_manager: ModelManager, prompt_refiner_classes=[]):
27
+ self.text_encoder = model_manager.fetch_model("flux_text_encoder_2")
28
+ self.dit = model_manager.fetch_model("cog_dit")
29
+ self.vae_encoder = model_manager.fetch_model("cog_vae_encoder")
30
+ self.vae_decoder = model_manager.fetch_model("cog_vae_decoder")
31
+ self.prompter.fetch_models(self.text_encoder)
32
+ self.prompter.load_prompt_refiners(model_manager, prompt_refiner_classes)
33
+
34
+
35
+ @staticmethod
36
+ def from_model_manager(model_manager: ModelManager, prompt_refiner_classes=[]):
37
+ pipe = CogVideoPipeline(
38
+ device=model_manager.device,
39
+ torch_dtype=model_manager.torch_dtype
40
+ )
41
+ pipe.fetch_models(model_manager, prompt_refiner_classes)
42
+ return pipe
43
+
44
+
45
+ def tensor2video(self, frames):
46
+ frames = rearrange(frames, "C T H W -> T H W C")
47
+ frames = ((frames.float() + 1) * 127.5).clip(0, 255).cpu().numpy().astype(np.uint8)
48
+ frames = [Image.fromarray(frame) for frame in frames]
49
+ return frames
50
+
51
+
52
+ def encode_prompt(self, prompt, positive=True):
53
+ prompt_emb = self.prompter.encode_prompt(prompt, device=self.device, positive=positive)
54
+ return {"prompt_emb": prompt_emb}
55
+
56
+
57
+ def prepare_extra_input(self, latents):
58
+ return {"image_rotary_emb": self.dit.prepare_rotary_positional_embeddings(latents.shape[3], latents.shape[4], latents.shape[2], device=self.device)}
59
+
60
+
61
+ @torch.no_grad()
62
+ def __call__(
63
+ self,
64
+ prompt,
65
+ negative_prompt="",
66
+ input_video=None,
67
+ cfg_scale=7.0,
68
+ denoising_strength=1.0,
69
+ num_frames=49,
70
+ height=480,
71
+ width=720,
72
+ num_inference_steps=20,
73
+ tiled=False,
74
+ tile_size=(60, 90),
75
+ tile_stride=(30, 45),
76
+ seed=None,
77
+ progress_bar_cmd=tqdm,
78
+ progress_bar_st=None,
79
+ ):
80
+ height, width = self.check_resize_height_width(height, width)
81
+
82
+ # Tiler parameters
83
+ tiler_kwargs = {"tiled": tiled, "tile_size": tile_size, "tile_stride": tile_stride}
84
+
85
+ # Prepare scheduler
86
+ self.scheduler.set_timesteps(num_inference_steps, denoising_strength=denoising_strength)
87
+
88
+ # Prepare latent tensors
89
+ noise = self.generate_noise((1, 16, num_frames // 4 + 1, height//8, width//8), seed=seed, device="cpu", dtype=self.torch_dtype)
90
+
91
+ if denoising_strength == 1.0:
92
+ latents = noise.clone()
93
+ else:
94
+ input_video = self.preprocess_images(input_video)
95
+ input_video = torch.stack(input_video, dim=2)
96
+ latents = self.vae_encoder.encode_video(input_video, **tiler_kwargs, progress_bar=progress_bar_cmd).to(dtype=self.torch_dtype)
97
+ latents = self.scheduler.add_noise(latents, noise, self.scheduler.timesteps[0])
98
+ if not tiled: latents = latents.to(self.device)
99
+
100
+ # Encode prompt
101
+ prompt_emb_posi = self.encode_prompt(prompt, positive=True)
102
+ if cfg_scale != 1.0:
103
+ prompt_emb_nega = self.encode_prompt(negative_prompt, positive=False)
104
+
105
+ # Extra input
106
+ extra_input = self.prepare_extra_input(latents)
107
+
108
+ # Denoise
109
+ for progress_id, timestep in enumerate(progress_bar_cmd(self.scheduler.timesteps)):
110
+ timestep = timestep.unsqueeze(0).to(self.device)
111
+
112
+ # Classifier-free guidance
113
+ noise_pred_posi = self.dit(
114
+ latents, timestep=timestep, **prompt_emb_posi, **tiler_kwargs, **extra_input
115
+ )
116
+ if cfg_scale != 1.0:
117
+ noise_pred_nega = self.dit(
118
+ latents, timestep=timestep, **prompt_emb_nega, **tiler_kwargs, **extra_input
119
+ )
120
+ noise_pred = noise_pred_nega + cfg_scale * (noise_pred_posi - noise_pred_nega)
121
+ else:
122
+ noise_pred = noise_pred_posi
123
+
124
+ # DDIM
125
+ latents = self.scheduler.step(noise_pred, self.scheduler.timesteps[progress_id], latents)
126
+
127
+ # Update progress bar
128
+ if progress_bar_st is not None:
129
+ progress_bar_st.progress(progress_id / len(self.scheduler.timesteps))
130
+
131
+ # Decode image
132
+ video = self.vae_decoder.decode_video(latents.to("cpu"), **tiler_kwargs, progress_bar=progress_bar_cmd)
133
+ video = self.tensor2video(video[0])
134
+
135
+ return video