rahul7star commited on
Commit
88f15c0
·
verified ·
1 Parent(s): adc2f6a

Upload 24 files

Browse files
.gitignore ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hf_download/
2
+ outputs/
3
+ repo/
4
+ loras/
5
+ queue.json
6
+ settings.json
7
+ # Byte-compiled / optimized / DLL files
8
+ __pycache__/
9
+ *.py[cod]
10
+ *$py.class
11
+
12
+ # C extensions
13
+ *.so
14
+
15
+ # Distribution / packaging
16
+ .Python
17
+ build/
18
+ develop-eggs/
19
+ dist/
20
+ downloads/
21
+ eggs/
22
+ .eggs/
23
+ lib/
24
+ lib64/
25
+ parts/
26
+ sdist/
27
+ var/
28
+ wheels/
29
+ share/python-wheels/
30
+ *.egg-info/
31
+ .installed.cfg
32
+ *.egg
33
+ MANIFEST
34
+
35
+ # PyInstaller
36
+ # Usually these files are written by a python script from a template
37
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
38
+ *.manifest
39
+ *.spec
40
+
41
+ # Installer logs
42
+ pip-log.txt
43
+ pip-delete-this-directory.txt
44
+
45
+ # Unit test / coverage reports
46
+ htmlcov/
47
+ .tox/
48
+ .nox/
49
+ .coverage
50
+ .coverage.*
51
+ .cache
52
+ nosetests.xml
53
+ coverage.xml
54
+ *.cover
55
+ *.py,cover
56
+ .hypothesis/
57
+ .pytest_cache/
58
+ cover/
59
+
60
+ # Translations
61
+ *.mo
62
+ *.pot
63
+
64
+ # Django stuff:
65
+ *.log
66
+ local_settings.py
67
+ db.sqlite3
68
+ db.sqlite3-journal
69
+
70
+ # Flask stuff:
71
+ instance/
72
+ .webassets-cache
73
+
74
+ # Scrapy stuff:
75
+ .scrapy
76
+
77
+ # Sphinx documentation
78
+ docs/_build/
79
+
80
+ # PyBuilder
81
+ .pybuilder/
82
+ target/
83
+
84
+ # Jupyter Notebook
85
+ .ipynb_checkpoints
86
+
87
+ # IPython
88
+ profile_default/
89
+ ipython_config.py
90
+
91
+ # pyenv
92
+ # For a library or package, you might want to ignore these files since the code is
93
+ # intended to run in multiple environments; otherwise, check them in:
94
+ # .python-version
95
+
96
+ # pipenv
97
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
98
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
99
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
100
+ # install all needed dependencies.
101
+ #Pipfile.lock
102
+
103
+ # UV
104
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ #uv.lock
108
+
109
+ # poetry
110
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
111
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
112
+ # commonly ignored for libraries.
113
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
114
+ #poetry.lock
115
+
116
+ # pdm
117
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
118
+ #pdm.lock
119
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
120
+ # in version control.
121
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
122
+ .pdm.toml
123
+ .pdm-python
124
+ .pdm-build/
125
+
126
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
127
+ __pypackages__/
128
+
129
+ # Celery stuff
130
+ celerybeat-schedule
131
+ celerybeat.pid
132
+
133
+ # SageMath parsed files
134
+ *.sage.py
135
+
136
+ # Environments
137
+ .env
138
+ .venv
139
+ env/
140
+ venv/
141
+ ENV/
142
+ env.bak/
143
+ venv.bak/
144
+
145
+ # Spyder project settings
146
+ .spyderproject
147
+ .spyproject
148
+
149
+ # Rope project settings
150
+ .ropeproject
151
+
152
+ # mkdocs documentation
153
+ /site
154
+
155
+ # mypy
156
+ .mypy_cache/
157
+ .dmypy.json
158
+ dmypy.json
159
+
160
+ # Pyre type checker
161
+ .pyre/
162
+
163
+ # pytype static type analyzer
164
+ .pytype/
165
+
166
+ # Cython debug symbols
167
+ cython_debug/
168
+
169
+ # PyCharm
170
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
171
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
172
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
173
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
174
+ .idea/
175
+
176
+ # Ruff stuff:
177
+ .ruff_cache/
178
+
179
+ # PyPI configuration file
180
+ .pypirc
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,12 +1,87 @@
1
- ---
2
- title: FramePack Studio
3
- emoji: 💻
4
- colorFrom: yellow
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 5.29.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FramePack Studio
2
+
3
+ FramePack Studio is an enhanced version of the FramePack demo script, designed to create intricate video scenes with improved prompt adherence. This is very much a work in progress, expect some bugs and broken features.
4
+ ![screencapture-127-0-0-1-7860-2025-05-04-20_13_58](https://github.com/user-attachments/assets/8fcb90af-8c3f-47ca-8f23-61d9b59438ae)
5
+
6
+
7
+ ## Current Features
8
+
9
+ - **F1 and Original FramePack Models**: Run both in a single queue
10
+ - **Timestamped Prompts**: Define different prompts for specific time segments in your video
11
+ - **Prompt Blending**: Define the blending time between timestamped prompts
12
+ - **Basic LoRA Support**: Works with most (all?) hunyuan LoRAs
13
+ - **Queue System**: Process multiple generation jobs without blocking the interface
14
+ - **Metadata Saving/Import**: Prompt and seed are encoded into the output PNG, all other generation metadata is saved in a JSON file
15
+ - **I2V and T2V**: Works with or without an input image to allow for more flexibility when working with standard LoRAs
16
+ - **Latent Image Options**: When using T2V you can generate based on a black, white, green screen or pure noise image
17
+
18
+
19
+ ## Fresh Installation
20
+
21
+ ### Prerequisites
22
+
23
+ - Python 3.10+
24
+ - CUDA-compatible GPU with at least 8GB VRAM (16GB+ recommended)
25
+
26
+ ### Setup
27
+
28
+ Install via the Pinokio community script "FP-Studio" or:
29
+
30
+ 1. Clone the repository:
31
+ ```bash
32
+ git clone https://github.com/colinurbs/FramePack-Studio.git
33
+ cd FramePack-Studio
34
+ ```
35
+
36
+ 2. Install dependencies:
37
+ ```bash
38
+ pip install -r requirements.txt
39
+ ```
40
+
41
+ ## Usage
42
+
43
+ Run the studio interface:
44
+
45
+ ```bash
46
+ python studio.py
47
+ ```
48
+
49
+ Additional command line options:
50
+ - `--share`: Create a public Gradio link to share your interface
51
+ - `--server`: Specify the server address (default: 0.0.0.0)
52
+ - `--port`: Specify a custom port
53
+ - `--inbrowser`: Automatically open the interface in your browser
54
+
55
+ ## LoRAs
56
+
57
+ Add LoRAs to the /loras/ folder at the root of the installation. Select the LoRAs you wish to load and set the weights for each generation.
58
+
59
+ NOTE: slow lora loading is a known issue
60
+
61
+ ## Working with Timestamped Prompts
62
+
63
+ You can create videos with changing prompts over time using the following syntax:
64
+
65
+ ```
66
+ [0s: A serene forest with sunlight filtering through the trees ]
67
+ [5s: A deer appears in the clearing ]
68
+ [10s: The deer drinks from a small stream ]
69
+ ```
70
+
71
+ Each timestamp defines when that prompt should start influencing the generation. The system will (hopefully) smoothly transition between prompts for a cohesive video.
72
+
73
+ ## Credits
74
+ Many thanks to [Lvmin Zhang](https://github.com/lllyasviel) for the absolutely amazing work on the original [FramePack](https://github.com/lllyasviel/FramePack) code!
75
+
76
+ Thanks to [Rickard Edén](https://github.com/neph1) for the LoRA code and their general contributions to this growing FramePack scene!
77
+
78
+ Thanks to everyone who has joined the Discord, reported a bug, sumbitted a PR or helped with testing!
79
+
80
+
81
+
82
+ @article{zhang2025framepack,
83
+ title={Packing Input Frame Contexts in Next-Frame Prediction Models for Video Generation},
84
+ author={Lvmin Zhang and Maneesh Agrawala},
85
+ journal={Arxiv},
86
+ year={2025}
87
+ }
diffusers_helper/bucket_tools.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bucket_options = {
2
+ 640: [
3
+ (416, 960),
4
+ (448, 864),
5
+ (480, 832),
6
+ (512, 768),
7
+ (544, 704),
8
+ (576, 672),
9
+ (608, 640),
10
+ (640, 608),
11
+ (672, 576),
12
+ (704, 544),
13
+ (768, 512),
14
+ (832, 480),
15
+ (864, 448),
16
+ (960, 416),
17
+ ],
18
+ # Add options for other resolutions with similar aspect ratios
19
+ 128: [
20
+ (96, 160),
21
+ (112, 144),
22
+ (128, 128),
23
+ (144, 112),
24
+ (160, 96),
25
+ ],
26
+ 256: [
27
+ (192, 320),
28
+ (224, 288),
29
+ (256, 256),
30
+ (288, 224),
31
+ (320, 192),
32
+ ],
33
+ 384: [
34
+ (256, 512),
35
+ (320, 448),
36
+ (384, 384),
37
+ (448, 320),
38
+ (512, 256),
39
+ ],
40
+ 512: [
41
+ (352, 704),
42
+ (384, 640),
43
+ (448, 576),
44
+ (512, 512),
45
+ (576, 448),
46
+ (640, 384),
47
+ (704, 352),
48
+ ],
49
+ 768: [
50
+ (512, 1024),
51
+ (576, 896),
52
+ (640, 832),
53
+ (704, 768),
54
+ (768, 704),
55
+ (832, 640),
56
+ (896, 576),
57
+ (1024, 512),
58
+ ],
59
+ }
60
+
61
+
62
+ def find_nearest_bucket(h, w, resolution=640):
63
+ # Use the provided resolution or find the closest available bucket size
64
+ print(f"find_nearest_bucket called with h={h}, w={w}, resolution={resolution}")
65
+
66
+ if resolution not in bucket_options:
67
+ # Find the closest available resolution
68
+ available_resolutions = list(bucket_options.keys())
69
+ closest_resolution = min(available_resolutions, key=lambda x: abs(x - resolution))
70
+ print(f"Resolution {resolution} not found in bucket options, using closest available: {closest_resolution}")
71
+ resolution = closest_resolution
72
+ else:
73
+ print(f"Resolution {resolution} found in bucket options")
74
+
75
+ # Calculate the aspect ratio of the input image
76
+ input_aspect_ratio = w / h if h > 0 else 1.0
77
+ print(f"Input aspect ratio: {input_aspect_ratio:.4f}")
78
+
79
+ min_diff = float('inf')
80
+ best_bucket = None
81
+
82
+ # Find the bucket size with the closest aspect ratio to the input image
83
+ for (bucket_h, bucket_w) in bucket_options[resolution]:
84
+ bucket_aspect_ratio = bucket_w / bucket_h if bucket_h > 0 else 1.0
85
+ # Calculate the difference in aspect ratios
86
+ diff = abs(bucket_aspect_ratio - input_aspect_ratio)
87
+ if diff < min_diff:
88
+ min_diff = diff
89
+ best_bucket = (bucket_h, bucket_w)
90
+ print(f" Checking bucket ({bucket_h}, {bucket_w}), aspect ratio={bucket_aspect_ratio:.4f}, diff={diff:.4f}, current best={best_bucket}")
91
+
92
+ print(f"Using resolution {resolution}, selected bucket: {best_bucket}")
93
+ return best_bucket
diffusers_helper/clip_vision.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def hf_clip_vision_encode(image, feature_extractor, image_encoder):
5
+ assert isinstance(image, np.ndarray)
6
+ assert image.ndim == 3 and image.shape[2] == 3
7
+ assert image.dtype == np.uint8
8
+
9
+ preprocessed = feature_extractor.preprocess(images=image, return_tensors="pt").to(device=image_encoder.device, dtype=image_encoder.dtype)
10
+ image_encoder_output = image_encoder(**preprocessed)
11
+
12
+ return image_encoder_output
diffusers_helper/dit_common.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import accelerate.accelerator
3
+
4
+ from diffusers.models.normalization import RMSNorm, LayerNorm, FP32LayerNorm, AdaLayerNormContinuous
5
+
6
+
7
+ accelerate.accelerator.convert_outputs_to_fp32 = lambda x: x
8
+
9
+
10
+ def LayerNorm_forward(self, x):
11
+ return torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps).to(x)
12
+
13
+
14
+ LayerNorm.forward = LayerNorm_forward
15
+ torch.nn.LayerNorm.forward = LayerNorm_forward
16
+
17
+
18
+ def FP32LayerNorm_forward(self, x):
19
+ origin_dtype = x.dtype
20
+ return torch.nn.functional.layer_norm(
21
+ x.float(),
22
+ self.normalized_shape,
23
+ self.weight.float() if self.weight is not None else None,
24
+ self.bias.float() if self.bias is not None else None,
25
+ self.eps,
26
+ ).to(origin_dtype)
27
+
28
+
29
+ FP32LayerNorm.forward = FP32LayerNorm_forward
30
+
31
+
32
+ def RMSNorm_forward(self, hidden_states):
33
+ input_dtype = hidden_states.dtype
34
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
35
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
36
+
37
+ if self.weight is None:
38
+ return hidden_states.to(input_dtype)
39
+
40
+ return hidden_states.to(input_dtype) * self.weight.to(input_dtype)
41
+
42
+
43
+ RMSNorm.forward = RMSNorm_forward
44
+
45
+
46
+ def AdaLayerNormContinuous_forward(self, x, conditioning_embedding):
47
+ emb = self.linear(self.silu(conditioning_embedding))
48
+ scale, shift = emb.chunk(2, dim=1)
49
+ x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
50
+ return x
51
+
52
+
53
+ AdaLayerNormContinuous.forward = AdaLayerNormContinuous_forward
diffusers_helper/gradio/progress_bar.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ progress_html = '''
2
+ <div class="loader-container">
3
+ <div class="loader"></div>
4
+ <div class="progress-container">
5
+ <progress value="*number*" max="100"></progress>
6
+ </div>
7
+ <span>*text*</span>
8
+ </div>
9
+ '''
10
+
11
+ css = '''
12
+ .loader-container {
13
+ display: flex; /* Use flex to align items horizontally */
14
+ align-items: center; /* Center items vertically within the container */
15
+ white-space: nowrap; /* Prevent line breaks within the container */
16
+ }
17
+
18
+ .loader {
19
+ border: 8px solid #f3f3f3; /* Light grey */
20
+ border-top: 8px solid #3498db; /* Blue */
21
+ border-radius: 50%;
22
+ width: 30px;
23
+ height: 30px;
24
+ animation: spin 2s linear infinite;
25
+ }
26
+
27
+ @keyframes spin {
28
+ 0% { transform: rotate(0deg); }
29
+ 100% { transform: rotate(360deg); }
30
+ }
31
+
32
+ /* Style the progress bar */
33
+ progress {
34
+ appearance: none; /* Remove default styling */
35
+ height: 20px; /* Set the height of the progress bar */
36
+ border-radius: 5px; /* Round the corners of the progress bar */
37
+ background-color: #f3f3f3; /* Light grey background */
38
+ width: 100%;
39
+ vertical-align: middle !important;
40
+ }
41
+
42
+ /* Style the progress bar container */
43
+ .progress-container {
44
+ margin-left: 20px;
45
+ margin-right: 20px;
46
+ flex-grow: 1; /* Allow the progress container to take up remaining space */
47
+ }
48
+
49
+ /* Set the color of the progress bar fill */
50
+ progress::-webkit-progress-value {
51
+ background-color: #3498db; /* Blue color for the fill */
52
+ }
53
+
54
+ progress::-moz-progress-bar {
55
+ background-color: #3498db; /* Blue color for the fill in Firefox */
56
+ }
57
+
58
+ /* Style the text on the progress bar */
59
+ progress::after {
60
+ content: attr(value '%'); /* Display the progress value followed by '%' */
61
+ position: absolute;
62
+ top: 50%;
63
+ left: 50%;
64
+ transform: translate(-50%, -50%);
65
+ color: white; /* Set text color */
66
+ font-size: 14px; /* Set font size */
67
+ }
68
+
69
+ /* Style other texts */
70
+ .loader-container > span {
71
+ margin-left: 5px; /* Add spacing between the progress bar and the text */
72
+ }
73
+
74
+ .no-generating-animation > .generating {
75
+ display: none !important;
76
+ }
77
+
78
+ '''
79
+
80
+
81
+ def make_progress_bar_html(number, text):
82
+ return progress_html.replace('*number*', str(number)).replace('*text*', text)
83
+
84
+
85
+ def make_progress_bar_css():
86
+ return css
diffusers_helper/hf_login.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ def login(token):
5
+ from huggingface_hub import login
6
+ import time
7
+
8
+ while True:
9
+ try:
10
+ login(token)
11
+ print('HF login ok.')
12
+ break
13
+ except Exception as e:
14
+ print(f'HF login failed: {e}. Retrying')
15
+ time.sleep(0.5)
16
+
17
+
18
+ hf_token = os.environ.get('HF_TOKEN', None)
19
+
20
+ if hf_token is not None:
21
+ login(hf_token)
diffusers_helper/hunyuan.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video import DEFAULT_PROMPT_TEMPLATE
4
+ from diffusers_helper.utils import crop_or_pad_yield_mask
5
+
6
+
7
+ @torch.no_grad()
8
+ def encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2, max_length=256):
9
+ assert isinstance(prompt, str)
10
+
11
+ prompt = [prompt]
12
+
13
+ # LLAMA
14
+
15
+ prompt_llama = [DEFAULT_PROMPT_TEMPLATE["template"].format(p) for p in prompt]
16
+ crop_start = DEFAULT_PROMPT_TEMPLATE["crop_start"]
17
+
18
+ llama_inputs = tokenizer(
19
+ prompt_llama,
20
+ padding="max_length",
21
+ max_length=max_length + crop_start,
22
+ truncation=True,
23
+ return_tensors="pt",
24
+ return_length=False,
25
+ return_overflowing_tokens=False,
26
+ return_attention_mask=True,
27
+ )
28
+
29
+ llama_input_ids = llama_inputs.input_ids.to(text_encoder.device)
30
+ llama_attention_mask = llama_inputs.attention_mask.to(text_encoder.device)
31
+ llama_attention_length = int(llama_attention_mask.sum())
32
+
33
+ llama_outputs = text_encoder(
34
+ input_ids=llama_input_ids,
35
+ attention_mask=llama_attention_mask,
36
+ output_hidden_states=True,
37
+ )
38
+
39
+ llama_vec = llama_outputs.hidden_states[-3][:, crop_start:llama_attention_length]
40
+ # llama_vec_remaining = llama_outputs.hidden_states[-3][:, llama_attention_length:]
41
+ llama_attention_mask = llama_attention_mask[:, crop_start:llama_attention_length]
42
+
43
+ assert torch.all(llama_attention_mask.bool())
44
+
45
+ # CLIP
46
+
47
+ clip_l_input_ids = tokenizer_2(
48
+ prompt,
49
+ padding="max_length",
50
+ max_length=77,
51
+ truncation=True,
52
+ return_overflowing_tokens=False,
53
+ return_length=False,
54
+ return_tensors="pt",
55
+ ).input_ids
56
+ clip_l_pooler = text_encoder_2(clip_l_input_ids.to(text_encoder_2.device), output_hidden_states=False).pooler_output
57
+
58
+ return llama_vec, clip_l_pooler
59
+
60
+
61
+ @torch.no_grad()
62
+ def vae_decode_fake(latents):
63
+ latent_rgb_factors = [
64
+ [-0.0395, -0.0331, 0.0445],
65
+ [0.0696, 0.0795, 0.0518],
66
+ [0.0135, -0.0945, -0.0282],
67
+ [0.0108, -0.0250, -0.0765],
68
+ [-0.0209, 0.0032, 0.0224],
69
+ [-0.0804, -0.0254, -0.0639],
70
+ [-0.0991, 0.0271, -0.0669],
71
+ [-0.0646, -0.0422, -0.0400],
72
+ [-0.0696, -0.0595, -0.0894],
73
+ [-0.0799, -0.0208, -0.0375],
74
+ [0.1166, 0.1627, 0.0962],
75
+ [0.1165, 0.0432, 0.0407],
76
+ [-0.2315, -0.1920, -0.1355],
77
+ [-0.0270, 0.0401, -0.0821],
78
+ [-0.0616, -0.0997, -0.0727],
79
+ [0.0249, -0.0469, -0.1703]
80
+ ] # From comfyui
81
+
82
+ latent_rgb_factors_bias = [0.0259, -0.0192, -0.0761]
83
+
84
+ weight = torch.tensor(latent_rgb_factors, device=latents.device, dtype=latents.dtype).transpose(0, 1)[:, :, None, None, None]
85
+ bias = torch.tensor(latent_rgb_factors_bias, device=latents.device, dtype=latents.dtype)
86
+
87
+ images = torch.nn.functional.conv3d(latents, weight, bias=bias, stride=1, padding=0, dilation=1, groups=1)
88
+ images = images.clamp(0.0, 1.0)
89
+
90
+ return images
91
+
92
+
93
+ @torch.no_grad()
94
+ def vae_decode(latents, vae, image_mode=False):
95
+ latents = latents / vae.config.scaling_factor
96
+
97
+ if not image_mode:
98
+ image = vae.decode(latents.to(device=vae.device, dtype=vae.dtype)).sample
99
+ else:
100
+ latents = latents.to(device=vae.device, dtype=vae.dtype).unbind(2)
101
+ image = [vae.decode(l.unsqueeze(2)).sample for l in latents]
102
+ image = torch.cat(image, dim=2)
103
+
104
+ return image
105
+
106
+
107
+ @torch.no_grad()
108
+ def vae_encode(image, vae):
109
+ latents = vae.encode(image.to(device=vae.device, dtype=vae.dtype)).latent_dist.sample()
110
+ latents = latents * vae.config.scaling_factor
111
+ return latents
diffusers_helper/k_diffusion/uni_pc_fm.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Better Flow Matching UniPC by Lvmin Zhang
2
+ # (c) 2025
3
+ # CC BY-SA 4.0
4
+ # Attribution-ShareAlike 4.0 International Licence
5
+
6
+
7
+ import torch
8
+
9
+ from tqdm.auto import trange
10
+
11
+
12
+ def expand_dims(v, dims):
13
+ return v[(...,) + (None,) * (dims - 1)]
14
+
15
+
16
+ class FlowMatchUniPC:
17
+ def __init__(self, model, extra_args, variant='bh1'):
18
+ self.model = model
19
+ self.variant = variant
20
+ self.extra_args = extra_args
21
+
22
+ def model_fn(self, x, t):
23
+ return self.model(x, t, **self.extra_args)
24
+
25
+ def update_fn(self, x, model_prev_list, t_prev_list, t, order):
26
+ assert order <= len(model_prev_list)
27
+ dims = x.dim()
28
+
29
+ t_prev_0 = t_prev_list[-1]
30
+ lambda_prev_0 = - torch.log(t_prev_0)
31
+ lambda_t = - torch.log(t)
32
+ model_prev_0 = model_prev_list[-1]
33
+
34
+ h = lambda_t - lambda_prev_0
35
+
36
+ rks = []
37
+ D1s = []
38
+ for i in range(1, order):
39
+ t_prev_i = t_prev_list[-(i + 1)]
40
+ model_prev_i = model_prev_list[-(i + 1)]
41
+ lambda_prev_i = - torch.log(t_prev_i)
42
+ rk = ((lambda_prev_i - lambda_prev_0) / h)[0]
43
+ rks.append(rk)
44
+ D1s.append((model_prev_i - model_prev_0) / rk)
45
+
46
+ rks.append(1.)
47
+ rks = torch.tensor(rks, device=x.device)
48
+
49
+ R = []
50
+ b = []
51
+
52
+ hh = -h[0]
53
+ h_phi_1 = torch.expm1(hh)
54
+ h_phi_k = h_phi_1 / hh - 1
55
+
56
+ factorial_i = 1
57
+
58
+ if self.variant == 'bh1':
59
+ B_h = hh
60
+ elif self.variant == 'bh2':
61
+ B_h = torch.expm1(hh)
62
+ else:
63
+ raise NotImplementedError('Bad variant!')
64
+
65
+ for i in range(1, order + 1):
66
+ R.append(torch.pow(rks, i - 1))
67
+ b.append(h_phi_k * factorial_i / B_h)
68
+ factorial_i *= (i + 1)
69
+ h_phi_k = h_phi_k / hh - 1 / factorial_i
70
+
71
+ R = torch.stack(R)
72
+ b = torch.tensor(b, device=x.device)
73
+
74
+ use_predictor = len(D1s) > 0
75
+
76
+ if use_predictor:
77
+ D1s = torch.stack(D1s, dim=1)
78
+ if order == 2:
79
+ rhos_p = torch.tensor([0.5], device=b.device)
80
+ else:
81
+ rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
82
+ else:
83
+ D1s = None
84
+ rhos_p = None
85
+
86
+ if order == 1:
87
+ rhos_c = torch.tensor([0.5], device=b.device)
88
+ else:
89
+ rhos_c = torch.linalg.solve(R, b)
90
+
91
+ x_t_ = expand_dims(t / t_prev_0, dims) * x - expand_dims(h_phi_1, dims) * model_prev_0
92
+
93
+ if use_predictor:
94
+ pred_res = torch.tensordot(D1s, rhos_p, dims=([1], [0]))
95
+ else:
96
+ pred_res = 0
97
+
98
+ x_t = x_t_ - expand_dims(B_h, dims) * pred_res
99
+ model_t = self.model_fn(x_t, t)
100
+
101
+ if D1s is not None:
102
+ corr_res = torch.tensordot(D1s, rhos_c[:-1], dims=([1], [0]))
103
+ else:
104
+ corr_res = 0
105
+
106
+ D1_t = (model_t - model_prev_0)
107
+ x_t = x_t_ - expand_dims(B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
108
+
109
+ return x_t, model_t
110
+
111
+ def sample(self, x, sigmas, callback=None, disable_pbar=False):
112
+ order = min(3, len(sigmas) - 2)
113
+ model_prev_list, t_prev_list = [], []
114
+ for i in trange(len(sigmas) - 1, disable=disable_pbar):
115
+ vec_t = sigmas[i].expand(x.shape[0])
116
+
117
+ if i == 0:
118
+ model_prev_list = [self.model_fn(x, vec_t)]
119
+ t_prev_list = [vec_t]
120
+ elif i < order:
121
+ init_order = i
122
+ x, model_x = self.update_fn(x, model_prev_list, t_prev_list, vec_t, init_order)
123
+ model_prev_list.append(model_x)
124
+ t_prev_list.append(vec_t)
125
+ else:
126
+ x, model_x = self.update_fn(x, model_prev_list, t_prev_list, vec_t, order)
127
+ model_prev_list.append(model_x)
128
+ t_prev_list.append(vec_t)
129
+
130
+ model_prev_list = model_prev_list[-order:]
131
+ t_prev_list = t_prev_list[-order:]
132
+
133
+ if callback is not None:
134
+ callback({'x': x, 'i': i, 'denoised': model_prev_list[-1]})
135
+
136
+ return model_prev_list[-1]
137
+
138
+
139
+ def sample_unipc(model, noise, sigmas, extra_args=None, callback=None, disable=False, variant='bh1'):
140
+ assert variant in ['bh1', 'bh2']
141
+ return FlowMatchUniPC(model, extra_args=extra_args, variant=variant).sample(noise, sigmas=sigmas, callback=callback, disable_pbar=disable)
diffusers_helper/k_diffusion/wrapper.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def append_dims(x, target_dims):
5
+ return x[(...,) + (None,) * (target_dims - x.ndim)]
6
+
7
+
8
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=1.0):
9
+ if guidance_rescale == 0:
10
+ return noise_cfg
11
+
12
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
13
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
14
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
15
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg
16
+ return noise_cfg
17
+
18
+
19
+ def fm_wrapper(transformer, t_scale=1000.0):
20
+ def k_model(x, sigma, **extra_args):
21
+ dtype = extra_args['dtype']
22
+ cfg_scale = extra_args['cfg_scale']
23
+ cfg_rescale = extra_args['cfg_rescale']
24
+ concat_latent = extra_args['concat_latent']
25
+
26
+ original_dtype = x.dtype
27
+ sigma = sigma.float()
28
+
29
+ x = x.to(dtype)
30
+ timestep = (sigma * t_scale).to(dtype)
31
+
32
+ if concat_latent is None:
33
+ hidden_states = x
34
+ else:
35
+ hidden_states = torch.cat([x, concat_latent.to(x)], dim=1)
36
+
37
+ pred_positive = transformer(hidden_states=hidden_states, timestep=timestep, return_dict=False, **extra_args['positive'])[0].float()
38
+
39
+ if cfg_scale == 1.0:
40
+ pred_negative = torch.zeros_like(pred_positive)
41
+ else:
42
+ pred_negative = transformer(hidden_states=hidden_states, timestep=timestep, return_dict=False, **extra_args['negative'])[0].float()
43
+
44
+ pred_cfg = pred_negative + cfg_scale * (pred_positive - pred_negative)
45
+ pred = rescale_noise_cfg(pred_cfg, pred_positive, guidance_rescale=cfg_rescale)
46
+
47
+ x0 = x.float() - pred.float() * append_dims(sigma, x.ndim)
48
+
49
+ return x0.to(dtype=original_dtype)
50
+
51
+ return k_model
diffusers_helper/lora_utils.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Optional, Union
3
+ from diffusers.loaders.lora_pipeline import _fetch_state_dict
4
+ from diffusers.loaders.lora_conversion_utils import _convert_hunyuan_video_lora_to_diffusers
5
+ from diffusers.utils.peft_utils import set_weights_and_activate_adapters
6
+ from diffusers.loaders.peft import _SET_ADAPTER_SCALE_FN_MAPPING
7
+ import torch
8
+
9
+ def load_lora(transformer, lora_path: Path, weight_name: Optional[str] = "pytorch_lora_weights.safetensors"):
10
+ """
11
+ Load LoRA weights into the transformer model.
12
+
13
+ Args:
14
+ transformer: The transformer model to which LoRA weights will be applied.
15
+ lora_path (Path): Path to the LoRA weights file.
16
+ weight_name (Optional[str]): Name of the weight to load.
17
+
18
+ """
19
+
20
+ state_dict = _fetch_state_dict(
21
+ lora_path,
22
+ weight_name,
23
+ True,
24
+ True,
25
+ None,
26
+ None,
27
+ None,
28
+ None,
29
+ None,
30
+ None,
31
+ None,
32
+ None)
33
+
34
+ state_dict = _convert_hunyuan_video_lora_to_diffusers(state_dict)
35
+
36
+ adapter_name = weight_name.split(".")[0]
37
+
38
+ # Check if adapter already exists and delete it if it does
39
+ if hasattr(transformer, 'peft_config') and adapter_name in transformer.peft_config:
40
+ print(f"Adapter '{adapter_name}' already exists. Removing it before loading again.")
41
+ # Use delete_adapters (plural) instead of delete_adapter
42
+ transformer.delete_adapters([adapter_name])
43
+
44
+ # Load the adapter with the original name
45
+ transformer.load_lora_adapter(state_dict, network_alphas=None, adapter_name=adapter_name)
46
+ print(f"LoRA weights '{adapter_name}' loaded successfully.")
47
+
48
+ return transformer
49
+
50
+ def unload_all_loras(transformer):
51
+ """
52
+ Completely unload all LoRA adapters from the transformer model.
53
+ """
54
+ if hasattr(transformer, 'peft_config') and transformer.peft_config:
55
+ # Get all adapter names
56
+ adapter_names = list(transformer.peft_config.keys())
57
+
58
+ if adapter_names:
59
+ print(f"Removing all LoRA adapters: {', '.join(adapter_names)}")
60
+ # Delete all adapters
61
+ transformer.delete_adapters(adapter_names)
62
+
63
+ # Force cleanup of any remaining adapter references
64
+ if hasattr(transformer, 'active_adapter'):
65
+ transformer.active_adapter = None
66
+
67
+ # Clear any cached states
68
+ for module in transformer.modules():
69
+ if hasattr(module, 'lora_A'):
70
+ if isinstance(module.lora_A, dict):
71
+ module.lora_A.clear()
72
+ if hasattr(module, 'lora_B'):
73
+ if isinstance(module.lora_B, dict):
74
+ module.lora_B.clear()
75
+ if hasattr(module, 'scaling'):
76
+ if isinstance(module.scaling, dict):
77
+ module.scaling.clear()
78
+
79
+ print("All LoRA adapters have been completely removed.")
80
+ else:
81
+ print("No LoRA adapters found to remove.")
82
+ else:
83
+ print("Model doesn't have any LoRA adapters or peft_config.")
84
+
85
+ # Force garbage collection
86
+ import gc
87
+ gc.collect()
88
+ if torch.cuda.is_available():
89
+ torch.cuda.empty_cache()
90
+
91
+ return transformer
92
+
93
+
94
+ # TODO(neph1): remove when HunyuanVideoTransformer3DModelPacked is in _SET_ADAPTER_SCALE_FN_MAPPING
95
+ def set_adapters(
96
+ transformer,
97
+ adapter_names: Union[List[str], str],
98
+ weights: Optional[Union[float, Dict, List[float], List[Dict], List[None]]] = None,
99
+ ):
100
+
101
+ adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
102
+
103
+ # Expand weights into a list, one entry per adapter
104
+ # examples for e.g. 2 adapters: [{...}, 7] -> [7,7] ; None -> [None, None]
105
+ if not isinstance(weights, list):
106
+ weights = [weights] * len(adapter_names)
107
+
108
+ if len(adapter_names) != len(weights):
109
+ raise ValueError(
110
+ f"Length of adapter names {len(adapter_names)} is not equal to the length of their weights {len(weights)}."
111
+ )
112
+
113
+ # Set None values to default of 1.0
114
+ # e.g. [{...}, 7] -> [{...}, 7] ; [None, None] -> [1.0, 1.0]
115
+ weights = [w if w is not None else 1.0 for w in weights]
116
+
117
+ # e.g. [{...}, 7] -> [{expanded dict...}, 7]
118
+ scale_expansion_fn = _SET_ADAPTER_SCALE_FN_MAPPING["HunyuanVideoTransformer3DModel"]
119
+ weights = scale_expansion_fn(transformer, weights)
120
+
121
+ set_weights_and_activate_adapters(transformer, adapter_names, weights)
diffusers_helper/memory.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # By lllyasviel
2
+
3
+
4
+ import torch
5
+
6
+
7
+ cpu = torch.device('cpu')
8
+ gpu = torch.device(f'cuda:{torch.cuda.current_device()}')
9
+ gpu_complete_modules = []
10
+
11
+
12
+ class DynamicSwapInstaller:
13
+ @staticmethod
14
+ def _install_module(module: torch.nn.Module, **kwargs):
15
+ original_class = module.__class__
16
+ module.__dict__['forge_backup_original_class'] = original_class
17
+
18
+ def hacked_get_attr(self, name: str):
19
+ if '_parameters' in self.__dict__:
20
+ _parameters = self.__dict__['_parameters']
21
+ if name in _parameters:
22
+ p = _parameters[name]
23
+ if p is None:
24
+ return None
25
+ if p.__class__ == torch.nn.Parameter:
26
+ return torch.nn.Parameter(p.to(**kwargs), requires_grad=p.requires_grad)
27
+ else:
28
+ return p.to(**kwargs)
29
+ if '_buffers' in self.__dict__:
30
+ _buffers = self.__dict__['_buffers']
31
+ if name in _buffers:
32
+ return _buffers[name].to(**kwargs)
33
+ return super(original_class, self).__getattr__(name)
34
+
35
+ module.__class__ = type('DynamicSwap_' + original_class.__name__, (original_class,), {
36
+ '__getattr__': hacked_get_attr,
37
+ })
38
+
39
+ return
40
+
41
+ @staticmethod
42
+ def _uninstall_module(module: torch.nn.Module):
43
+ if 'forge_backup_original_class' in module.__dict__:
44
+ module.__class__ = module.__dict__.pop('forge_backup_original_class')
45
+ return
46
+
47
+ @staticmethod
48
+ def install_model(model: torch.nn.Module, **kwargs):
49
+ for m in model.modules():
50
+ DynamicSwapInstaller._install_module(m, **kwargs)
51
+ return
52
+
53
+ @staticmethod
54
+ def uninstall_model(model: torch.nn.Module):
55
+ for m in model.modules():
56
+ DynamicSwapInstaller._uninstall_module(m)
57
+ return
58
+
59
+
60
+ def fake_diffusers_current_device(model: torch.nn.Module, target_device: torch.device):
61
+ if hasattr(model, 'scale_shift_table'):
62
+ model.scale_shift_table.data = model.scale_shift_table.data.to(target_device)
63
+ return
64
+
65
+ for k, p in model.named_modules():
66
+ if hasattr(p, 'weight'):
67
+ p.to(target_device)
68
+ return
69
+
70
+
71
+ def get_cuda_free_memory_gb(device=None):
72
+ if device is None:
73
+ device = gpu
74
+
75
+ memory_stats = torch.cuda.memory_stats(device)
76
+ bytes_active = memory_stats['active_bytes.all.current']
77
+ bytes_reserved = memory_stats['reserved_bytes.all.current']
78
+ bytes_free_cuda, _ = torch.cuda.mem_get_info(device)
79
+ bytes_inactive_reserved = bytes_reserved - bytes_active
80
+ bytes_total_available = bytes_free_cuda + bytes_inactive_reserved
81
+ return bytes_total_available / (1024 ** 3)
82
+
83
+
84
+ def move_model_to_device_with_memory_preservation(model, target_device, preserved_memory_gb=0):
85
+ print(f'Moving {model.__class__.__name__} to {target_device} with preserved memory: {preserved_memory_gb} GB')
86
+
87
+ for m in model.modules():
88
+ if get_cuda_free_memory_gb(target_device) <= preserved_memory_gb:
89
+ torch.cuda.empty_cache()
90
+ return
91
+
92
+ if hasattr(m, 'weight'):
93
+ m.to(device=target_device)
94
+
95
+ model.to(device=target_device)
96
+ torch.cuda.empty_cache()
97
+ return
98
+
99
+
100
+ def offload_model_from_device_for_memory_preservation(model, target_device, preserved_memory_gb=0):
101
+ print(f'Offloading {model.__class__.__name__} from {target_device} to preserve memory: {preserved_memory_gb} GB')
102
+
103
+ for m in model.modules():
104
+ if get_cuda_free_memory_gb(target_device) >= preserved_memory_gb:
105
+ torch.cuda.empty_cache()
106
+ return
107
+
108
+ if hasattr(m, 'weight'):
109
+ m.to(device=cpu)
110
+
111
+ model.to(device=cpu)
112
+ torch.cuda.empty_cache()
113
+ return
114
+
115
+
116
+ def unload_complete_models(*args):
117
+ for m in gpu_complete_modules + list(args):
118
+ m.to(device=cpu)
119
+ print(f'Unloaded {m.__class__.__name__} as complete.')
120
+
121
+ gpu_complete_modules.clear()
122
+ torch.cuda.empty_cache()
123
+ return
124
+
125
+
126
+ def load_model_as_complete(model, target_device, unload=True):
127
+ if unload:
128
+ unload_complete_models()
129
+
130
+ model.to(device=target_device)
131
+ print(f'Loaded {model.__class__.__name__} to {target_device} as complete.')
132
+
133
+ gpu_complete_modules.append(model)
134
+ return
diffusers_helper/models/hunyuan_video_packed.py ADDED
@@ -0,0 +1,1032 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional, Tuple, Union
2
+
3
+ import torch
4
+ import einops
5
+ import torch.nn as nn
6
+ import numpy as np
7
+
8
+ from diffusers.loaders import FromOriginalModelMixin
9
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
10
+ from diffusers.loaders import PeftAdapterMixin
11
+ from diffusers.utils import logging
12
+ from diffusers.models.attention import FeedForward
13
+ from diffusers.models.attention_processor import Attention
14
+ from diffusers.models.embeddings import TimestepEmbedding, Timesteps, PixArtAlphaTextProjection
15
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
16
+ from diffusers.models.modeling_utils import ModelMixin
17
+ from diffusers_helper.dit_common import LayerNorm
18
+ from diffusers_helper.utils import zero_module
19
+
20
+
21
+ enabled_backends = []
22
+
23
+ if torch.backends.cuda.flash_sdp_enabled():
24
+ enabled_backends.append("flash")
25
+ if torch.backends.cuda.math_sdp_enabled():
26
+ enabled_backends.append("math")
27
+ if torch.backends.cuda.mem_efficient_sdp_enabled():
28
+ enabled_backends.append("mem_efficient")
29
+ if torch.backends.cuda.cudnn_sdp_enabled():
30
+ enabled_backends.append("cudnn")
31
+
32
+ print("Currently enabled native sdp backends:", enabled_backends)
33
+
34
+ try:
35
+ # raise NotImplementedError
36
+ from xformers.ops import memory_efficient_attention as xformers_attn_func
37
+ print('Xformers is installed!')
38
+ except:
39
+ print('Xformers is not installed!')
40
+ xformers_attn_func = None
41
+
42
+ try:
43
+ # raise NotImplementedError
44
+ from flash_attn import flash_attn_varlen_func, flash_attn_func
45
+ print('Flash Attn is installed!')
46
+ except:
47
+ print('Flash Attn is not installed!')
48
+ flash_attn_varlen_func = None
49
+ flash_attn_func = None
50
+
51
+ try:
52
+ # raise NotImplementedError
53
+ from sageattention import sageattn_varlen, sageattn
54
+ print('Sage Attn is installed!')
55
+ except:
56
+ print('Sage Attn is not installed!')
57
+ sageattn_varlen = None
58
+ sageattn = None
59
+
60
+
61
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
62
+
63
+
64
+ def pad_for_3d_conv(x, kernel_size):
65
+ b, c, t, h, w = x.shape
66
+ pt, ph, pw = kernel_size
67
+ pad_t = (pt - (t % pt)) % pt
68
+ pad_h = (ph - (h % ph)) % ph
69
+ pad_w = (pw - (w % pw)) % pw
70
+ return torch.nn.functional.pad(x, (0, pad_w, 0, pad_h, 0, pad_t), mode='replicate')
71
+
72
+
73
+ def center_down_sample_3d(x, kernel_size):
74
+ # pt, ph, pw = kernel_size
75
+ # cp = (pt * ph * pw) // 2
76
+ # xp = einops.rearrange(x, 'b c (t pt) (h ph) (w pw) -> (pt ph pw) b c t h w', pt=pt, ph=ph, pw=pw)
77
+ # xc = xp[cp]
78
+ # return xc
79
+ return torch.nn.functional.avg_pool3d(x, kernel_size, stride=kernel_size)
80
+
81
+
82
+ def get_cu_seqlens(text_mask, img_len):
83
+ batch_size = text_mask.shape[0]
84
+ text_len = text_mask.sum(dim=1)
85
+ max_len = text_mask.shape[1] + img_len
86
+
87
+ cu_seqlens = torch.zeros([2 * batch_size + 1], dtype=torch.int32, device="cuda")
88
+
89
+ for i in range(batch_size):
90
+ s = text_len[i] + img_len
91
+ s1 = i * max_len + s
92
+ s2 = (i + 1) * max_len
93
+ cu_seqlens[2 * i + 1] = s1
94
+ cu_seqlens[2 * i + 2] = s2
95
+
96
+ return cu_seqlens
97
+
98
+
99
+ def apply_rotary_emb_transposed(x, freqs_cis):
100
+ cos, sin = freqs_cis.unsqueeze(-2).chunk(2, dim=-1)
101
+ x_real, x_imag = x.unflatten(-1, (-1, 2)).unbind(-1)
102
+ x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3)
103
+ out = x.float() * cos + x_rotated.float() * sin
104
+ out = out.to(x)
105
+ return out
106
+
107
+
108
+ def attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv):
109
+ if cu_seqlens_q is None and cu_seqlens_kv is None and max_seqlen_q is None and max_seqlen_kv is None:
110
+ if sageattn is not None:
111
+ x = sageattn(q, k, v, tensor_layout='NHD')
112
+ return x
113
+
114
+ if flash_attn_func is not None:
115
+ x = flash_attn_func(q, k, v)
116
+ return x
117
+
118
+ if xformers_attn_func is not None:
119
+ x = xformers_attn_func(q, k, v)
120
+ return x
121
+
122
+ x = torch.nn.functional.scaled_dot_product_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)).transpose(1, 2)
123
+ return x
124
+
125
+ batch_size = q.shape[0]
126
+ q = q.view(q.shape[0] * q.shape[1], *q.shape[2:])
127
+ k = k.view(k.shape[0] * k.shape[1], *k.shape[2:])
128
+ v = v.view(v.shape[0] * v.shape[1], *v.shape[2:])
129
+ if sageattn_varlen is not None:
130
+ x = sageattn_varlen(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
131
+ elif flash_attn_varlen_func is not None:
132
+ x = flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
133
+ else:
134
+ raise NotImplementedError('No Attn Installed!')
135
+ x = x.view(batch_size, max_seqlen_q, *x.shape[2:])
136
+ return x
137
+
138
+
139
+ class HunyuanAttnProcessorFlashAttnDouble:
140
+ def __call__(self, attn, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb):
141
+ cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv = attention_mask
142
+
143
+ query = attn.to_q(hidden_states)
144
+ key = attn.to_k(hidden_states)
145
+ value = attn.to_v(hidden_states)
146
+
147
+ query = query.unflatten(2, (attn.heads, -1))
148
+ key = key.unflatten(2, (attn.heads, -1))
149
+ value = value.unflatten(2, (attn.heads, -1))
150
+
151
+ query = attn.norm_q(query)
152
+ key = attn.norm_k(key)
153
+
154
+ query = apply_rotary_emb_transposed(query, image_rotary_emb)
155
+ key = apply_rotary_emb_transposed(key, image_rotary_emb)
156
+
157
+ encoder_query = attn.add_q_proj(encoder_hidden_states)
158
+ encoder_key = attn.add_k_proj(encoder_hidden_states)
159
+ encoder_value = attn.add_v_proj(encoder_hidden_states)
160
+
161
+ encoder_query = encoder_query.unflatten(2, (attn.heads, -1))
162
+ encoder_key = encoder_key.unflatten(2, (attn.heads, -1))
163
+ encoder_value = encoder_value.unflatten(2, (attn.heads, -1))
164
+
165
+ encoder_query = attn.norm_added_q(encoder_query)
166
+ encoder_key = attn.norm_added_k(encoder_key)
167
+
168
+ query = torch.cat([query, encoder_query], dim=1)
169
+ key = torch.cat([key, encoder_key], dim=1)
170
+ value = torch.cat([value, encoder_value], dim=1)
171
+
172
+ hidden_states = attn_varlen_func(query, key, value, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
173
+ hidden_states = hidden_states.flatten(-2)
174
+
175
+ txt_length = encoder_hidden_states.shape[1]
176
+ hidden_states, encoder_hidden_states = hidden_states[:, :-txt_length], hidden_states[:, -txt_length:]
177
+
178
+ hidden_states = attn.to_out[0](hidden_states)
179
+ hidden_states = attn.to_out[1](hidden_states)
180
+ encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
181
+
182
+ return hidden_states, encoder_hidden_states
183
+
184
+
185
+ class HunyuanAttnProcessorFlashAttnSingle:
186
+ def __call__(self, attn, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb):
187
+ cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv = attention_mask
188
+
189
+ hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1)
190
+
191
+ query = attn.to_q(hidden_states)
192
+ key = attn.to_k(hidden_states)
193
+ value = attn.to_v(hidden_states)
194
+
195
+ query = query.unflatten(2, (attn.heads, -1))
196
+ key = key.unflatten(2, (attn.heads, -1))
197
+ value = value.unflatten(2, (attn.heads, -1))
198
+
199
+ query = attn.norm_q(query)
200
+ key = attn.norm_k(key)
201
+
202
+ txt_length = encoder_hidden_states.shape[1]
203
+
204
+ query = torch.cat([apply_rotary_emb_transposed(query[:, :-txt_length], image_rotary_emb), query[:, -txt_length:]], dim=1)
205
+ key = torch.cat([apply_rotary_emb_transposed(key[:, :-txt_length], image_rotary_emb), key[:, -txt_length:]], dim=1)
206
+
207
+ hidden_states = attn_varlen_func(query, key, value, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
208
+ hidden_states = hidden_states.flatten(-2)
209
+
210
+ hidden_states, encoder_hidden_states = hidden_states[:, :-txt_length], hidden_states[:, -txt_length:]
211
+
212
+ return hidden_states, encoder_hidden_states
213
+
214
+
215
+ class CombinedTimestepGuidanceTextProjEmbeddings(nn.Module):
216
+ def __init__(self, embedding_dim, pooled_projection_dim):
217
+ super().__init__()
218
+
219
+ self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
220
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
221
+ self.guidance_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
222
+ self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu")
223
+
224
+ def forward(self, timestep, guidance, pooled_projection):
225
+ timesteps_proj = self.time_proj(timestep)
226
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype))
227
+
228
+ guidance_proj = self.time_proj(guidance)
229
+ guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype))
230
+
231
+ time_guidance_emb = timesteps_emb + guidance_emb
232
+
233
+ pooled_projections = self.text_embedder(pooled_projection)
234
+ conditioning = time_guidance_emb + pooled_projections
235
+
236
+ return conditioning
237
+
238
+
239
+ class CombinedTimestepTextProjEmbeddings(nn.Module):
240
+ def __init__(self, embedding_dim, pooled_projection_dim):
241
+ super().__init__()
242
+
243
+ self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
244
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
245
+ self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu")
246
+
247
+ def forward(self, timestep, pooled_projection):
248
+ timesteps_proj = self.time_proj(timestep)
249
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype))
250
+
251
+ pooled_projections = self.text_embedder(pooled_projection)
252
+
253
+ conditioning = timesteps_emb + pooled_projections
254
+
255
+ return conditioning
256
+
257
+
258
+ class HunyuanVideoAdaNorm(nn.Module):
259
+ def __init__(self, in_features: int, out_features: Optional[int] = None) -> None:
260
+ super().__init__()
261
+
262
+ out_features = out_features or 2 * in_features
263
+ self.linear = nn.Linear(in_features, out_features)
264
+ self.nonlinearity = nn.SiLU()
265
+
266
+ def forward(
267
+ self, temb: torch.Tensor
268
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
269
+ temb = self.linear(self.nonlinearity(temb))
270
+ gate_msa, gate_mlp = temb.chunk(2, dim=-1)
271
+ gate_msa, gate_mlp = gate_msa.unsqueeze(1), gate_mlp.unsqueeze(1)
272
+ return gate_msa, gate_mlp
273
+
274
+
275
+ class HunyuanVideoIndividualTokenRefinerBlock(nn.Module):
276
+ def __init__(
277
+ self,
278
+ num_attention_heads: int,
279
+ attention_head_dim: int,
280
+ mlp_width_ratio: str = 4.0,
281
+ mlp_drop_rate: float = 0.0,
282
+ attention_bias: bool = True,
283
+ ) -> None:
284
+ super().__init__()
285
+
286
+ hidden_size = num_attention_heads * attention_head_dim
287
+
288
+ self.norm1 = LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6)
289
+ self.attn = Attention(
290
+ query_dim=hidden_size,
291
+ cross_attention_dim=None,
292
+ heads=num_attention_heads,
293
+ dim_head=attention_head_dim,
294
+ bias=attention_bias,
295
+ )
296
+
297
+ self.norm2 = LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6)
298
+ self.ff = FeedForward(hidden_size, mult=mlp_width_ratio, activation_fn="linear-silu", dropout=mlp_drop_rate)
299
+
300
+ self.norm_out = HunyuanVideoAdaNorm(hidden_size, 2 * hidden_size)
301
+
302
+ def forward(
303
+ self,
304
+ hidden_states: torch.Tensor,
305
+ temb: torch.Tensor,
306
+ attention_mask: Optional[torch.Tensor] = None,
307
+ ) -> torch.Tensor:
308
+ norm_hidden_states = self.norm1(hidden_states)
309
+
310
+ attn_output = self.attn(
311
+ hidden_states=norm_hidden_states,
312
+ encoder_hidden_states=None,
313
+ attention_mask=attention_mask,
314
+ )
315
+
316
+ gate_msa, gate_mlp = self.norm_out(temb)
317
+ hidden_states = hidden_states + attn_output * gate_msa
318
+
319
+ ff_output = self.ff(self.norm2(hidden_states))
320
+ hidden_states = hidden_states + ff_output * gate_mlp
321
+
322
+ return hidden_states
323
+
324
+
325
+ class HunyuanVideoIndividualTokenRefiner(nn.Module):
326
+ def __init__(
327
+ self,
328
+ num_attention_heads: int,
329
+ attention_head_dim: int,
330
+ num_layers: int,
331
+ mlp_width_ratio: float = 4.0,
332
+ mlp_drop_rate: float = 0.0,
333
+ attention_bias: bool = True,
334
+ ) -> None:
335
+ super().__init__()
336
+
337
+ self.refiner_blocks = nn.ModuleList(
338
+ [
339
+ HunyuanVideoIndividualTokenRefinerBlock(
340
+ num_attention_heads=num_attention_heads,
341
+ attention_head_dim=attention_head_dim,
342
+ mlp_width_ratio=mlp_width_ratio,
343
+ mlp_drop_rate=mlp_drop_rate,
344
+ attention_bias=attention_bias,
345
+ )
346
+ for _ in range(num_layers)
347
+ ]
348
+ )
349
+
350
+ def forward(
351
+ self,
352
+ hidden_states: torch.Tensor,
353
+ temb: torch.Tensor,
354
+ attention_mask: Optional[torch.Tensor] = None,
355
+ ) -> None:
356
+ self_attn_mask = None
357
+ if attention_mask is not None:
358
+ batch_size = attention_mask.shape[0]
359
+ seq_len = attention_mask.shape[1]
360
+ attention_mask = attention_mask.to(hidden_states.device).bool()
361
+ self_attn_mask_1 = attention_mask.view(batch_size, 1, 1, seq_len).repeat(1, 1, seq_len, 1)
362
+ self_attn_mask_2 = self_attn_mask_1.transpose(2, 3)
363
+ self_attn_mask = (self_attn_mask_1 & self_attn_mask_2).bool()
364
+ self_attn_mask[:, :, :, 0] = True
365
+
366
+ for block in self.refiner_blocks:
367
+ hidden_states = block(hidden_states, temb, self_attn_mask)
368
+
369
+ return hidden_states
370
+
371
+
372
+ class HunyuanVideoTokenRefiner(nn.Module):
373
+ def __init__(
374
+ self,
375
+ in_channels: int,
376
+ num_attention_heads: int,
377
+ attention_head_dim: int,
378
+ num_layers: int,
379
+ mlp_ratio: float = 4.0,
380
+ mlp_drop_rate: float = 0.0,
381
+ attention_bias: bool = True,
382
+ ) -> None:
383
+ super().__init__()
384
+
385
+ hidden_size = num_attention_heads * attention_head_dim
386
+
387
+ self.time_text_embed = CombinedTimestepTextProjEmbeddings(
388
+ embedding_dim=hidden_size, pooled_projection_dim=in_channels
389
+ )
390
+ self.proj_in = nn.Linear(in_channels, hidden_size, bias=True)
391
+ self.token_refiner = HunyuanVideoIndividualTokenRefiner(
392
+ num_attention_heads=num_attention_heads,
393
+ attention_head_dim=attention_head_dim,
394
+ num_layers=num_layers,
395
+ mlp_width_ratio=mlp_ratio,
396
+ mlp_drop_rate=mlp_drop_rate,
397
+ attention_bias=attention_bias,
398
+ )
399
+
400
+ def forward(
401
+ self,
402
+ hidden_states: torch.Tensor,
403
+ timestep: torch.LongTensor,
404
+ attention_mask: Optional[torch.LongTensor] = None,
405
+ ) -> torch.Tensor:
406
+ if attention_mask is None:
407
+ pooled_projections = hidden_states.mean(dim=1)
408
+ else:
409
+ original_dtype = hidden_states.dtype
410
+ mask_float = attention_mask.float().unsqueeze(-1)
411
+ pooled_projections = (hidden_states * mask_float).sum(dim=1) / mask_float.sum(dim=1)
412
+ pooled_projections = pooled_projections.to(original_dtype)
413
+
414
+ temb = self.time_text_embed(timestep, pooled_projections)
415
+ hidden_states = self.proj_in(hidden_states)
416
+ hidden_states = self.token_refiner(hidden_states, temb, attention_mask)
417
+
418
+ return hidden_states
419
+
420
+
421
+ class HunyuanVideoRotaryPosEmbed(nn.Module):
422
+ def __init__(self, rope_dim, theta):
423
+ super().__init__()
424
+ self.DT, self.DY, self.DX = rope_dim
425
+ self.theta = theta
426
+
427
+ @torch.no_grad()
428
+ def get_frequency(self, dim, pos):
429
+ T, H, W = pos.shape
430
+ freqs = 1.0 / (self.theta ** (torch.arange(0, dim, 2, dtype=torch.float32, device=pos.device)[: (dim // 2)] / dim))
431
+ freqs = torch.outer(freqs, pos.reshape(-1)).unflatten(-1, (T, H, W)).repeat_interleave(2, dim=0)
432
+ return freqs.cos(), freqs.sin()
433
+
434
+ @torch.no_grad()
435
+ def forward_inner(self, frame_indices, height, width, device):
436
+ GT, GY, GX = torch.meshgrid(
437
+ frame_indices.to(device=device, dtype=torch.float32),
438
+ torch.arange(0, height, device=device, dtype=torch.float32),
439
+ torch.arange(0, width, device=device, dtype=torch.float32),
440
+ indexing="ij"
441
+ )
442
+
443
+ FCT, FST = self.get_frequency(self.DT, GT)
444
+ FCY, FSY = self.get_frequency(self.DY, GY)
445
+ FCX, FSX = self.get_frequency(self.DX, GX)
446
+
447
+ result = torch.cat([FCT, FCY, FCX, FST, FSY, FSX], dim=0)
448
+
449
+ return result.to(device)
450
+
451
+ @torch.no_grad()
452
+ def forward(self, frame_indices, height, width, device):
453
+ frame_indices = frame_indices.unbind(0)
454
+ results = [self.forward_inner(f, height, width, device) for f in frame_indices]
455
+ results = torch.stack(results, dim=0)
456
+ return results
457
+
458
+
459
+ class AdaLayerNormZero(nn.Module):
460
+ def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
461
+ super().__init__()
462
+ self.silu = nn.SiLU()
463
+ self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=bias)
464
+ if norm_type == "layer_norm":
465
+ self.norm = LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
466
+ else:
467
+ raise ValueError(f"unknown norm_type {norm_type}")
468
+
469
+ def forward(
470
+ self,
471
+ x: torch.Tensor,
472
+ emb: Optional[torch.Tensor] = None,
473
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
474
+ emb = emb.unsqueeze(-2)
475
+ emb = self.linear(self.silu(emb))
476
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=-1)
477
+ x = self.norm(x) * (1 + scale_msa) + shift_msa
478
+ return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
479
+
480
+
481
+ class AdaLayerNormZeroSingle(nn.Module):
482
+ def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
483
+ super().__init__()
484
+
485
+ self.silu = nn.SiLU()
486
+ self.linear = nn.Linear(embedding_dim, 3 * embedding_dim, bias=bias)
487
+ if norm_type == "layer_norm":
488
+ self.norm = LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
489
+ else:
490
+ raise ValueError(f"unknown norm_type {norm_type}")
491
+
492
+ def forward(
493
+ self,
494
+ x: torch.Tensor,
495
+ emb: Optional[torch.Tensor] = None,
496
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
497
+ emb = emb.unsqueeze(-2)
498
+ emb = self.linear(self.silu(emb))
499
+ shift_msa, scale_msa, gate_msa = emb.chunk(3, dim=-1)
500
+ x = self.norm(x) * (1 + scale_msa) + shift_msa
501
+ return x, gate_msa
502
+
503
+
504
+ class AdaLayerNormContinuous(nn.Module):
505
+ def __init__(
506
+ self,
507
+ embedding_dim: int,
508
+ conditioning_embedding_dim: int,
509
+ elementwise_affine=True,
510
+ eps=1e-5,
511
+ bias=True,
512
+ norm_type="layer_norm",
513
+ ):
514
+ super().__init__()
515
+ self.silu = nn.SiLU()
516
+ self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias)
517
+ if norm_type == "layer_norm":
518
+ self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias)
519
+ else:
520
+ raise ValueError(f"unknown norm_type {norm_type}")
521
+
522
+ def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor:
523
+ emb = emb.unsqueeze(-2)
524
+ emb = self.linear(self.silu(emb))
525
+ scale, shift = emb.chunk(2, dim=-1)
526
+ x = self.norm(x) * (1 + scale) + shift
527
+ return x
528
+
529
+
530
+ class HunyuanVideoSingleTransformerBlock(nn.Module):
531
+ def __init__(
532
+ self,
533
+ num_attention_heads: int,
534
+ attention_head_dim: int,
535
+ mlp_ratio: float = 4.0,
536
+ qk_norm: str = "rms_norm",
537
+ ) -> None:
538
+ super().__init__()
539
+
540
+ hidden_size = num_attention_heads * attention_head_dim
541
+ mlp_dim = int(hidden_size * mlp_ratio)
542
+
543
+ self.attn = Attention(
544
+ query_dim=hidden_size,
545
+ cross_attention_dim=None,
546
+ dim_head=attention_head_dim,
547
+ heads=num_attention_heads,
548
+ out_dim=hidden_size,
549
+ bias=True,
550
+ processor=HunyuanAttnProcessorFlashAttnSingle(),
551
+ qk_norm=qk_norm,
552
+ eps=1e-6,
553
+ pre_only=True,
554
+ )
555
+
556
+ self.norm = AdaLayerNormZeroSingle(hidden_size, norm_type="layer_norm")
557
+ self.proj_mlp = nn.Linear(hidden_size, mlp_dim)
558
+ self.act_mlp = nn.GELU(approximate="tanh")
559
+ self.proj_out = nn.Linear(hidden_size + mlp_dim, hidden_size)
560
+
561
+ def forward(
562
+ self,
563
+ hidden_states: torch.Tensor,
564
+ encoder_hidden_states: torch.Tensor,
565
+ temb: torch.Tensor,
566
+ attention_mask: Optional[torch.Tensor] = None,
567
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
568
+ ) -> torch.Tensor:
569
+ text_seq_length = encoder_hidden_states.shape[1]
570
+ hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1)
571
+
572
+ residual = hidden_states
573
+
574
+ # 1. Input normalization
575
+ norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
576
+ mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
577
+
578
+ norm_hidden_states, norm_encoder_hidden_states = (
579
+ norm_hidden_states[:, :-text_seq_length, :],
580
+ norm_hidden_states[:, -text_seq_length:, :],
581
+ )
582
+
583
+ # 2. Attention
584
+ attn_output, context_attn_output = self.attn(
585
+ hidden_states=norm_hidden_states,
586
+ encoder_hidden_states=norm_encoder_hidden_states,
587
+ attention_mask=attention_mask,
588
+ image_rotary_emb=image_rotary_emb,
589
+ )
590
+ attn_output = torch.cat([attn_output, context_attn_output], dim=1)
591
+
592
+ # 3. Modulation and residual connection
593
+ hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
594
+ hidden_states = gate * self.proj_out(hidden_states)
595
+ hidden_states = hidden_states + residual
596
+
597
+ hidden_states, encoder_hidden_states = (
598
+ hidden_states[:, :-text_seq_length, :],
599
+ hidden_states[:, -text_seq_length:, :],
600
+ )
601
+ return hidden_states, encoder_hidden_states
602
+
603
+
604
+ class HunyuanVideoTransformerBlock(nn.Module):
605
+ def __init__(
606
+ self,
607
+ num_attention_heads: int,
608
+ attention_head_dim: int,
609
+ mlp_ratio: float,
610
+ qk_norm: str = "rms_norm",
611
+ ) -> None:
612
+ super().__init__()
613
+
614
+ hidden_size = num_attention_heads * attention_head_dim
615
+
616
+ self.norm1 = AdaLayerNormZero(hidden_size, norm_type="layer_norm")
617
+ self.norm1_context = AdaLayerNormZero(hidden_size, norm_type="layer_norm")
618
+
619
+ self.attn = Attention(
620
+ query_dim=hidden_size,
621
+ cross_attention_dim=None,
622
+ added_kv_proj_dim=hidden_size,
623
+ dim_head=attention_head_dim,
624
+ heads=num_attention_heads,
625
+ out_dim=hidden_size,
626
+ context_pre_only=False,
627
+ bias=True,
628
+ processor=HunyuanAttnProcessorFlashAttnDouble(),
629
+ qk_norm=qk_norm,
630
+ eps=1e-6,
631
+ )
632
+
633
+ self.norm2 = LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
634
+ self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate")
635
+
636
+ self.norm2_context = LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
637
+ self.ff_context = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate")
638
+
639
+ def forward(
640
+ self,
641
+ hidden_states: torch.Tensor,
642
+ encoder_hidden_states: torch.Tensor,
643
+ temb: torch.Tensor,
644
+ attention_mask: Optional[torch.Tensor] = None,
645
+ freqs_cis: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
646
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
647
+ # 1. Input normalization
648
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
649
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(encoder_hidden_states, emb=temb)
650
+
651
+ # 2. Joint attention
652
+ attn_output, context_attn_output = self.attn(
653
+ hidden_states=norm_hidden_states,
654
+ encoder_hidden_states=norm_encoder_hidden_states,
655
+ attention_mask=attention_mask,
656
+ image_rotary_emb=freqs_cis,
657
+ )
658
+
659
+ # 3. Modulation and residual connection
660
+ hidden_states = hidden_states + attn_output * gate_msa
661
+ encoder_hidden_states = encoder_hidden_states + context_attn_output * c_gate_msa
662
+
663
+ norm_hidden_states = self.norm2(hidden_states)
664
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
665
+
666
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
667
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp) + c_shift_mlp
668
+
669
+ # 4. Feed-forward
670
+ ff_output = self.ff(norm_hidden_states)
671
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
672
+
673
+ hidden_states = hidden_states + gate_mlp * ff_output
674
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp * context_ff_output
675
+
676
+ return hidden_states, encoder_hidden_states
677
+
678
+
679
+ class ClipVisionProjection(nn.Module):
680
+ def __init__(self, in_channels, out_channels):
681
+ super().__init__()
682
+ self.up = nn.Linear(in_channels, out_channels * 3)
683
+ self.down = nn.Linear(out_channels * 3, out_channels)
684
+
685
+ def forward(self, x):
686
+ projected_x = self.down(nn.functional.silu(self.up(x)))
687
+ return projected_x
688
+
689
+
690
+ class HunyuanVideoPatchEmbed(nn.Module):
691
+ def __init__(self, patch_size, in_chans, embed_dim):
692
+ super().__init__()
693
+ self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
694
+
695
+
696
+ class HunyuanVideoPatchEmbedForCleanLatents(nn.Module):
697
+ def __init__(self, inner_dim):
698
+ super().__init__()
699
+ self.proj = nn.Conv3d(16, inner_dim, kernel_size=(1, 2, 2), stride=(1, 2, 2))
700
+ self.proj_2x = nn.Conv3d(16, inner_dim, kernel_size=(2, 4, 4), stride=(2, 4, 4))
701
+ self.proj_4x = nn.Conv3d(16, inner_dim, kernel_size=(4, 8, 8), stride=(4, 8, 8))
702
+
703
+ @torch.no_grad()
704
+ def initialize_weight_from_another_conv3d(self, another_layer):
705
+ weight = another_layer.weight.detach().clone()
706
+ bias = another_layer.bias.detach().clone()
707
+
708
+ sd = {
709
+ 'proj.weight': weight.clone(),
710
+ 'proj.bias': bias.clone(),
711
+ 'proj_2x.weight': einops.repeat(weight, 'b c t h w -> b c (t tk) (h hk) (w wk)', tk=2, hk=2, wk=2) / 8.0,
712
+ 'proj_2x.bias': bias.clone(),
713
+ 'proj_4x.weight': einops.repeat(weight, 'b c t h w -> b c (t tk) (h hk) (w wk)', tk=4, hk=4, wk=4) / 64.0,
714
+ 'proj_4x.bias': bias.clone(),
715
+ }
716
+
717
+ sd = {k: v.clone() for k, v in sd.items()}
718
+
719
+ self.load_state_dict(sd)
720
+ return
721
+
722
+
723
+ class HunyuanVideoTransformer3DModelPacked(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
724
+ @register_to_config
725
+ def __init__(
726
+ self,
727
+ in_channels: int = 16,
728
+ out_channels: int = 16,
729
+ num_attention_heads: int = 24,
730
+ attention_head_dim: int = 128,
731
+ num_layers: int = 20,
732
+ num_single_layers: int = 40,
733
+ num_refiner_layers: int = 2,
734
+ mlp_ratio: float = 4.0,
735
+ patch_size: int = 2,
736
+ patch_size_t: int = 1,
737
+ qk_norm: str = "rms_norm",
738
+ guidance_embeds: bool = True,
739
+ text_embed_dim: int = 4096,
740
+ pooled_projection_dim: int = 768,
741
+ rope_theta: float = 256.0,
742
+ rope_axes_dim: Tuple[int] = (16, 56, 56),
743
+ has_image_proj=False,
744
+ image_proj_dim=1152,
745
+ has_clean_x_embedder=False,
746
+ ) -> None:
747
+ super().__init__()
748
+
749
+ inner_dim = num_attention_heads * attention_head_dim
750
+ out_channels = out_channels or in_channels
751
+
752
+ # 1. Latent and condition embedders
753
+ self.x_embedder = HunyuanVideoPatchEmbed((patch_size_t, patch_size, patch_size), in_channels, inner_dim)
754
+ self.context_embedder = HunyuanVideoTokenRefiner(
755
+ text_embed_dim, num_attention_heads, attention_head_dim, num_layers=num_refiner_layers
756
+ )
757
+ self.time_text_embed = CombinedTimestepGuidanceTextProjEmbeddings(inner_dim, pooled_projection_dim)
758
+
759
+ self.clean_x_embedder = None
760
+ self.image_projection = None
761
+
762
+ # 2. RoPE
763
+ self.rope = HunyuanVideoRotaryPosEmbed(rope_axes_dim, rope_theta)
764
+
765
+ # 3. Dual stream transformer blocks
766
+ self.transformer_blocks = nn.ModuleList(
767
+ [
768
+ HunyuanVideoTransformerBlock(
769
+ num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm
770
+ )
771
+ for _ in range(num_layers)
772
+ ]
773
+ )
774
+
775
+ # 4. Single stream transformer blocks
776
+ self.single_transformer_blocks = nn.ModuleList(
777
+ [
778
+ HunyuanVideoSingleTransformerBlock(
779
+ num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm
780
+ )
781
+ for _ in range(num_single_layers)
782
+ ]
783
+ )
784
+
785
+ # 5. Output projection
786
+ self.norm_out = AdaLayerNormContinuous(inner_dim, inner_dim, elementwise_affine=False, eps=1e-6)
787
+ self.proj_out = nn.Linear(inner_dim, patch_size_t * patch_size * patch_size * out_channels)
788
+
789
+ self.inner_dim = inner_dim
790
+ self.use_gradient_checkpointing = False
791
+ self.enable_teacache = False
792
+
793
+ if has_image_proj:
794
+ self.install_image_projection(image_proj_dim)
795
+
796
+ if has_clean_x_embedder:
797
+ self.install_clean_x_embedder()
798
+
799
+ self.high_quality_fp32_output_for_inference = False
800
+
801
+ def install_image_projection(self, in_channels):
802
+ self.image_projection = ClipVisionProjection(in_channels=in_channels, out_channels=self.inner_dim)
803
+ self.config['has_image_proj'] = True
804
+ self.config['image_proj_dim'] = in_channels
805
+
806
+ def install_clean_x_embedder(self):
807
+ self.clean_x_embedder = HunyuanVideoPatchEmbedForCleanLatents(self.inner_dim)
808
+ self.config['has_clean_x_embedder'] = True
809
+
810
+ def enable_gradient_checkpointing(self):
811
+ self.use_gradient_checkpointing = True
812
+ print('self.use_gradient_checkpointing = True')
813
+
814
+ def disable_gradient_checkpointing(self):
815
+ self.use_gradient_checkpointing = False
816
+ print('self.use_gradient_checkpointing = False')
817
+
818
+ def initialize_teacache(self, enable_teacache=True, num_steps=25, rel_l1_thresh=0.15):
819
+ self.enable_teacache = enable_teacache
820
+ self.cnt = 0
821
+ self.num_steps = num_steps
822
+ self.rel_l1_thresh = rel_l1_thresh # 0.1 for 1.6x speedup, 0.15 for 2.1x speedup
823
+ self.accumulated_rel_l1_distance = 0
824
+ self.previous_modulated_input = None
825
+ self.previous_residual = None
826
+ self.teacache_rescale_func = np.poly1d([7.33226126e+02, -4.01131952e+02, 6.75869174e+01, -3.14987800e+00, 9.61237896e-02])
827
+
828
+ def gradient_checkpointing_method(self, block, *args):
829
+ if self.use_gradient_checkpointing:
830
+ result = torch.utils.checkpoint.checkpoint(block, *args, use_reentrant=False)
831
+ else:
832
+ result = block(*args)
833
+ return result
834
+
835
+ def process_input_hidden_states(
836
+ self,
837
+ latents, latent_indices=None,
838
+ clean_latents=None, clean_latent_indices=None,
839
+ clean_latents_2x=None, clean_latent_2x_indices=None,
840
+ clean_latents_4x=None, clean_latent_4x_indices=None
841
+ ):
842
+ hidden_states = self.gradient_checkpointing_method(self.x_embedder.proj, latents)
843
+ B, C, T, H, W = hidden_states.shape
844
+
845
+ if latent_indices is None:
846
+ latent_indices = torch.arange(0, T).unsqueeze(0).expand(B, -1)
847
+
848
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
849
+
850
+ rope_freqs = self.rope(frame_indices=latent_indices, height=H, width=W, device=hidden_states.device)
851
+ rope_freqs = rope_freqs.flatten(2).transpose(1, 2)
852
+
853
+ if clean_latents is not None and clean_latent_indices is not None:
854
+ clean_latents = clean_latents.to(hidden_states)
855
+ clean_latents = self.gradient_checkpointing_method(self.clean_x_embedder.proj, clean_latents)
856
+ clean_latents = clean_latents.flatten(2).transpose(1, 2)
857
+
858
+ clean_latent_rope_freqs = self.rope(frame_indices=clean_latent_indices, height=H, width=W, device=clean_latents.device)
859
+ clean_latent_rope_freqs = clean_latent_rope_freqs.flatten(2).transpose(1, 2)
860
+
861
+ hidden_states = torch.cat([clean_latents, hidden_states], dim=1)
862
+ rope_freqs = torch.cat([clean_latent_rope_freqs, rope_freqs], dim=1)
863
+
864
+ if clean_latents_2x is not None and clean_latent_2x_indices is not None:
865
+ clean_latents_2x = clean_latents_2x.to(hidden_states)
866
+ clean_latents_2x = pad_for_3d_conv(clean_latents_2x, (2, 4, 4))
867
+ clean_latents_2x = self.gradient_checkpointing_method(self.clean_x_embedder.proj_2x, clean_latents_2x)
868
+ clean_latents_2x = clean_latents_2x.flatten(2).transpose(1, 2)
869
+
870
+ clean_latent_2x_rope_freqs = self.rope(frame_indices=clean_latent_2x_indices, height=H, width=W, device=clean_latents_2x.device)
871
+ clean_latent_2x_rope_freqs = pad_for_3d_conv(clean_latent_2x_rope_freqs, (2, 2, 2))
872
+ clean_latent_2x_rope_freqs = center_down_sample_3d(clean_latent_2x_rope_freqs, (2, 2, 2))
873
+ clean_latent_2x_rope_freqs = clean_latent_2x_rope_freqs.flatten(2).transpose(1, 2)
874
+
875
+ hidden_states = torch.cat([clean_latents_2x, hidden_states], dim=1)
876
+ rope_freqs = torch.cat([clean_latent_2x_rope_freqs, rope_freqs], dim=1)
877
+
878
+ if clean_latents_4x is not None and clean_latent_4x_indices is not None:
879
+ clean_latents_4x = clean_latents_4x.to(hidden_states)
880
+ clean_latents_4x = pad_for_3d_conv(clean_latents_4x, (4, 8, 8))
881
+ clean_latents_4x = self.gradient_checkpointing_method(self.clean_x_embedder.proj_4x, clean_latents_4x)
882
+ clean_latents_4x = clean_latents_4x.flatten(2).transpose(1, 2)
883
+
884
+ clean_latent_4x_rope_freqs = self.rope(frame_indices=clean_latent_4x_indices, height=H, width=W, device=clean_latents_4x.device)
885
+ clean_latent_4x_rope_freqs = pad_for_3d_conv(clean_latent_4x_rope_freqs, (4, 4, 4))
886
+ clean_latent_4x_rope_freqs = center_down_sample_3d(clean_latent_4x_rope_freqs, (4, 4, 4))
887
+ clean_latent_4x_rope_freqs = clean_latent_4x_rope_freqs.flatten(2).transpose(1, 2)
888
+
889
+ hidden_states = torch.cat([clean_latents_4x, hidden_states], dim=1)
890
+ rope_freqs = torch.cat([clean_latent_4x_rope_freqs, rope_freqs], dim=1)
891
+
892
+ return hidden_states, rope_freqs
893
+
894
+ def forward(
895
+ self,
896
+ hidden_states, timestep, encoder_hidden_states, encoder_attention_mask, pooled_projections, guidance,
897
+ latent_indices=None,
898
+ clean_latents=None, clean_latent_indices=None,
899
+ clean_latents_2x=None, clean_latent_2x_indices=None,
900
+ clean_latents_4x=None, clean_latent_4x_indices=None,
901
+ image_embeddings=None,
902
+ attention_kwargs=None, return_dict=True
903
+ ):
904
+
905
+ if attention_kwargs is None:
906
+ attention_kwargs = {}
907
+
908
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
909
+ p, p_t = self.config['patch_size'], self.config['patch_size_t']
910
+ post_patch_num_frames = num_frames // p_t
911
+ post_patch_height = height // p
912
+ post_patch_width = width // p
913
+ original_context_length = post_patch_num_frames * post_patch_height * post_patch_width
914
+
915
+ hidden_states, rope_freqs = self.process_input_hidden_states(hidden_states, latent_indices, clean_latents, clean_latent_indices, clean_latents_2x, clean_latent_2x_indices, clean_latents_4x, clean_latent_4x_indices)
916
+
917
+ temb = self.gradient_checkpointing_method(self.time_text_embed, timestep, guidance, pooled_projections)
918
+ encoder_hidden_states = self.gradient_checkpointing_method(self.context_embedder, encoder_hidden_states, timestep, encoder_attention_mask)
919
+
920
+ if self.image_projection is not None:
921
+ assert image_embeddings is not None, 'You must use image embeddings!'
922
+ extra_encoder_hidden_states = self.gradient_checkpointing_method(self.image_projection, image_embeddings)
923
+ extra_attention_mask = torch.ones((batch_size, extra_encoder_hidden_states.shape[1]), dtype=encoder_attention_mask.dtype, device=encoder_attention_mask.device)
924
+
925
+ # must cat before (not after) encoder_hidden_states, due to attn masking
926
+ encoder_hidden_states = torch.cat([extra_encoder_hidden_states, encoder_hidden_states], dim=1)
927
+ encoder_attention_mask = torch.cat([extra_attention_mask, encoder_attention_mask], dim=1)
928
+
929
+ with torch.no_grad():
930
+ if batch_size == 1:
931
+ # When batch size is 1, we do not need any masks or var-len funcs since cropping is mathematically same to what we want
932
+ # If they are not same, then their impls are wrong. Ours are always the correct one.
933
+ text_len = encoder_attention_mask.sum().item()
934
+ encoder_hidden_states = encoder_hidden_states[:, :text_len]
935
+ attention_mask = None, None, None, None
936
+ else:
937
+ img_seq_len = hidden_states.shape[1]
938
+ txt_seq_len = encoder_hidden_states.shape[1]
939
+
940
+ cu_seqlens_q = get_cu_seqlens(encoder_attention_mask, img_seq_len)
941
+ cu_seqlens_kv = cu_seqlens_q
942
+ max_seqlen_q = img_seq_len + txt_seq_len
943
+ max_seqlen_kv = max_seqlen_q
944
+
945
+ attention_mask = cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv
946
+
947
+ if self.enable_teacache:
948
+ modulated_inp = self.transformer_blocks[0].norm1(hidden_states, emb=temb)[0]
949
+
950
+ if self.cnt == 0 or self.cnt == self.num_steps-1:
951
+ should_calc = True
952
+ self.accumulated_rel_l1_distance = 0
953
+ else:
954
+ curr_rel_l1 = ((modulated_inp - self.previous_modulated_input).abs().mean() / self.previous_modulated_input.abs().mean()).cpu().item()
955
+ self.accumulated_rel_l1_distance += self.teacache_rescale_func(curr_rel_l1)
956
+ should_calc = self.accumulated_rel_l1_distance >= self.rel_l1_thresh
957
+
958
+ if should_calc:
959
+ self.accumulated_rel_l1_distance = 0
960
+
961
+ self.previous_modulated_input = modulated_inp
962
+ self.cnt += 1
963
+
964
+ if self.cnt == self.num_steps:
965
+ self.cnt = 0
966
+
967
+ if not should_calc:
968
+ hidden_states = hidden_states + self.previous_residual
969
+ else:
970
+ ori_hidden_states = hidden_states.clone()
971
+
972
+ for block_id, block in enumerate(self.transformer_blocks):
973
+ hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
974
+ block,
975
+ hidden_states,
976
+ encoder_hidden_states,
977
+ temb,
978
+ attention_mask,
979
+ rope_freqs
980
+ )
981
+
982
+ for block_id, block in enumerate(self.single_transformer_blocks):
983
+ hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
984
+ block,
985
+ hidden_states,
986
+ encoder_hidden_states,
987
+ temb,
988
+ attention_mask,
989
+ rope_freqs
990
+ )
991
+
992
+ self.previous_residual = hidden_states - ori_hidden_states
993
+ else:
994
+ for block_id, block in enumerate(self.transformer_blocks):
995
+ hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
996
+ block,
997
+ hidden_states,
998
+ encoder_hidden_states,
999
+ temb,
1000
+ attention_mask,
1001
+ rope_freqs
1002
+ )
1003
+
1004
+ for block_id, block in enumerate(self.single_transformer_blocks):
1005
+ hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
1006
+ block,
1007
+ hidden_states,
1008
+ encoder_hidden_states,
1009
+ temb,
1010
+ attention_mask,
1011
+ rope_freqs
1012
+ )
1013
+
1014
+ hidden_states = self.gradient_checkpointing_method(self.norm_out, hidden_states, temb)
1015
+
1016
+ hidden_states = hidden_states[:, -original_context_length:, :]
1017
+
1018
+ if self.high_quality_fp32_output_for_inference:
1019
+ hidden_states = hidden_states.to(dtype=torch.float32)
1020
+ if self.proj_out.weight.dtype != torch.float32:
1021
+ self.proj_out.to(dtype=torch.float32)
1022
+
1023
+ hidden_states = self.gradient_checkpointing_method(self.proj_out, hidden_states)
1024
+
1025
+ hidden_states = einops.rearrange(hidden_states, 'b (t h w) (c pt ph pw) -> b c (t pt) (h ph) (w pw)',
1026
+ t=post_patch_num_frames, h=post_patch_height, w=post_patch_width,
1027
+ pt=p_t, ph=p, pw=p)
1028
+
1029
+ if return_dict:
1030
+ return Transformer2DModelOutput(sample=hidden_states)
1031
+
1032
+ return hidden_states,
diffusers_helper/pipelines/k_diffusion_hunyuan.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import math
3
+
4
+ from diffusers_helper.k_diffusion.uni_pc_fm import sample_unipc
5
+ from diffusers_helper.k_diffusion.wrapper import fm_wrapper
6
+ from diffusers_helper.utils import repeat_to_batch_size
7
+
8
+
9
+ def flux_time_shift(t, mu=1.15, sigma=1.0):
10
+ return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
11
+
12
+
13
+ def calculate_flux_mu(context_length, x1=256, y1=0.5, x2=4096, y2=1.15, exp_max=7.0):
14
+ k = (y2 - y1) / (x2 - x1)
15
+ b = y1 - k * x1
16
+ mu = k * context_length + b
17
+ mu = min(mu, math.log(exp_max))
18
+ return mu
19
+
20
+
21
+ def get_flux_sigmas_from_mu(n, mu):
22
+ sigmas = torch.linspace(1, 0, steps=n + 1)
23
+ sigmas = flux_time_shift(sigmas, mu=mu)
24
+ return sigmas
25
+
26
+
27
+ @torch.inference_mode()
28
+ def sample_hunyuan(
29
+ transformer,
30
+ sampler='unipc',
31
+ initial_latent=None,
32
+ concat_latent=None,
33
+ strength=1.0,
34
+ width=512,
35
+ height=512,
36
+ frames=16,
37
+ real_guidance_scale=1.0,
38
+ distilled_guidance_scale=6.0,
39
+ guidance_rescale=0.0,
40
+ shift=None,
41
+ num_inference_steps=25,
42
+ batch_size=None,
43
+ generator=None,
44
+ prompt_embeds=None,
45
+ prompt_embeds_mask=None,
46
+ prompt_poolers=None,
47
+ negative_prompt_embeds=None,
48
+ negative_prompt_embeds_mask=None,
49
+ negative_prompt_poolers=None,
50
+ dtype=torch.bfloat16,
51
+ device=None,
52
+ negative_kwargs=None,
53
+ callback=None,
54
+ **kwargs,
55
+ ):
56
+ device = device or transformer.device
57
+
58
+ if batch_size is None:
59
+ batch_size = int(prompt_embeds.shape[0])
60
+
61
+ latents = torch.randn((batch_size, 16, (frames + 3) // 4, height // 8, width // 8), generator=generator, device=generator.device).to(device=device, dtype=torch.float32)
62
+
63
+ B, C, T, H, W = latents.shape
64
+ seq_length = T * H * W // 4
65
+
66
+ if shift is None:
67
+ mu = calculate_flux_mu(seq_length, exp_max=7.0)
68
+ else:
69
+ mu = math.log(shift)
70
+
71
+ sigmas = get_flux_sigmas_from_mu(num_inference_steps, mu).to(device)
72
+
73
+ k_model = fm_wrapper(transformer)
74
+
75
+ if initial_latent is not None:
76
+ sigmas = sigmas * strength
77
+ first_sigma = sigmas[0].to(device=device, dtype=torch.float32)
78
+ initial_latent = initial_latent.to(device=device, dtype=torch.float32)
79
+ latents = initial_latent.float() * (1.0 - first_sigma) + latents.float() * first_sigma
80
+
81
+ if concat_latent is not None:
82
+ concat_latent = concat_latent.to(latents)
83
+
84
+ distilled_guidance = torch.tensor([distilled_guidance_scale * 1000.0] * batch_size).to(device=device, dtype=dtype)
85
+
86
+ prompt_embeds = repeat_to_batch_size(prompt_embeds, batch_size)
87
+ prompt_embeds_mask = repeat_to_batch_size(prompt_embeds_mask, batch_size)
88
+ prompt_poolers = repeat_to_batch_size(prompt_poolers, batch_size)
89
+ negative_prompt_embeds = repeat_to_batch_size(negative_prompt_embeds, batch_size)
90
+ negative_prompt_embeds_mask = repeat_to_batch_size(negative_prompt_embeds_mask, batch_size)
91
+ negative_prompt_poolers = repeat_to_batch_size(negative_prompt_poolers, batch_size)
92
+ concat_latent = repeat_to_batch_size(concat_latent, batch_size)
93
+
94
+ sampler_kwargs = dict(
95
+ dtype=dtype,
96
+ cfg_scale=real_guidance_scale,
97
+ cfg_rescale=guidance_rescale,
98
+ concat_latent=concat_latent,
99
+ positive=dict(
100
+ pooled_projections=prompt_poolers,
101
+ encoder_hidden_states=prompt_embeds,
102
+ encoder_attention_mask=prompt_embeds_mask,
103
+ guidance=distilled_guidance,
104
+ **kwargs,
105
+ ),
106
+ negative=dict(
107
+ pooled_projections=negative_prompt_poolers,
108
+ encoder_hidden_states=negative_prompt_embeds,
109
+ encoder_attention_mask=negative_prompt_embeds_mask,
110
+ guidance=distilled_guidance,
111
+ **(kwargs if negative_kwargs is None else {**kwargs, **negative_kwargs}),
112
+ )
113
+ )
114
+
115
+ if sampler == 'unipc':
116
+ results = sample_unipc(k_model, latents, sigmas, extra_args=sampler_kwargs, disable=False, callback=callback)
117
+ else:
118
+ raise NotImplementedError(f'Sampler {sampler} is not supported.')
119
+
120
+ return results
diffusers_helper/thread_utils.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ from threading import Thread, Lock
4
+
5
+
6
+ class Listener:
7
+ task_queue = []
8
+ lock = Lock()
9
+ thread = None
10
+
11
+ @classmethod
12
+ def _process_tasks(cls):
13
+ while True:
14
+ task = None
15
+ with cls.lock:
16
+ if cls.task_queue:
17
+ task = cls.task_queue.pop(0)
18
+
19
+ if task is None:
20
+ time.sleep(0.001)
21
+ continue
22
+
23
+ func, args, kwargs = task
24
+ try:
25
+ func(*args, **kwargs)
26
+ except Exception as e:
27
+ print(f"Error in listener thread: {e}")
28
+
29
+ @classmethod
30
+ def add_task(cls, func, *args, **kwargs):
31
+ with cls.lock:
32
+ cls.task_queue.append((func, args, kwargs))
33
+
34
+ if cls.thread is None:
35
+ cls.thread = Thread(target=cls._process_tasks, daemon=True)
36
+ cls.thread.start()
37
+
38
+
39
+ def async_run(func, *args, **kwargs):
40
+ Listener.add_task(func, *args, **kwargs)
41
+
42
+
43
+ class FIFOQueue:
44
+ def __init__(self):
45
+ self.queue = []
46
+ self.lock = Lock()
47
+
48
+ def push(self, item):
49
+ with self.lock:
50
+ self.queue.append(item)
51
+
52
+ def pop(self):
53
+ with self.lock:
54
+ if self.queue:
55
+ return self.queue.pop(0)
56
+ return None
57
+
58
+ def top(self):
59
+ with self.lock:
60
+ if self.queue:
61
+ return self.queue[0]
62
+ return None
63
+
64
+ def next(self):
65
+ while True:
66
+ with self.lock:
67
+ if self.queue:
68
+ return self.queue.pop(0)
69
+
70
+ time.sleep(0.001)
71
+
72
+
73
+ class AsyncStream:
74
+ def __init__(self):
75
+ self.input_queue = FIFOQueue()
76
+ self.output_queue = FIFOQueue()
diffusers_helper/utils.py ADDED
@@ -0,0 +1,613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import json
4
+ import random
5
+ import glob
6
+ import torch
7
+ import einops
8
+ import numpy as np
9
+ import datetime
10
+ import torchvision
11
+
12
+ import safetensors.torch as sf
13
+ from PIL import Image
14
+
15
+
16
+ def min_resize(x, m):
17
+ if x.shape[0] < x.shape[1]:
18
+ s0 = m
19
+ s1 = int(float(m) / float(x.shape[0]) * float(x.shape[1]))
20
+ else:
21
+ s0 = int(float(m) / float(x.shape[1]) * float(x.shape[0]))
22
+ s1 = m
23
+ new_max = max(s1, s0)
24
+ raw_max = max(x.shape[0], x.shape[1])
25
+ if new_max < raw_max:
26
+ interpolation = cv2.INTER_AREA
27
+ else:
28
+ interpolation = cv2.INTER_LANCZOS4
29
+ y = cv2.resize(x, (s1, s0), interpolation=interpolation)
30
+ return y
31
+
32
+
33
+ def d_resize(x, y):
34
+ H, W, C = y.shape
35
+ new_min = min(H, W)
36
+ raw_min = min(x.shape[0], x.shape[1])
37
+ if new_min < raw_min:
38
+ interpolation = cv2.INTER_AREA
39
+ else:
40
+ interpolation = cv2.INTER_LANCZOS4
41
+ y = cv2.resize(x, (W, H), interpolation=interpolation)
42
+ return y
43
+
44
+
45
+ def resize_and_center_crop(image, target_width, target_height):
46
+ if target_height == image.shape[0] and target_width == image.shape[1]:
47
+ return image
48
+
49
+ pil_image = Image.fromarray(image)
50
+ original_width, original_height = pil_image.size
51
+ scale_factor = max(target_width / original_width, target_height / original_height)
52
+ resized_width = int(round(original_width * scale_factor))
53
+ resized_height = int(round(original_height * scale_factor))
54
+ resized_image = pil_image.resize((resized_width, resized_height), Image.LANCZOS)
55
+ left = (resized_width - target_width) / 2
56
+ top = (resized_height - target_height) / 2
57
+ right = (resized_width + target_width) / 2
58
+ bottom = (resized_height + target_height) / 2
59
+ cropped_image = resized_image.crop((left, top, right, bottom))
60
+ return np.array(cropped_image)
61
+
62
+
63
+ def resize_and_center_crop_pytorch(image, target_width, target_height):
64
+ B, C, H, W = image.shape
65
+
66
+ if H == target_height and W == target_width:
67
+ return image
68
+
69
+ scale_factor = max(target_width / W, target_height / H)
70
+ resized_width = int(round(W * scale_factor))
71
+ resized_height = int(round(H * scale_factor))
72
+
73
+ resized = torch.nn.functional.interpolate(image, size=(resized_height, resized_width), mode='bilinear', align_corners=False)
74
+
75
+ top = (resized_height - target_height) // 2
76
+ left = (resized_width - target_width) // 2
77
+ cropped = resized[:, :, top:top + target_height, left:left + target_width]
78
+
79
+ return cropped
80
+
81
+
82
+ def resize_without_crop(image, target_width, target_height):
83
+ if target_height == image.shape[0] and target_width == image.shape[1]:
84
+ return image
85
+
86
+ pil_image = Image.fromarray(image)
87
+ resized_image = pil_image.resize((target_width, target_height), Image.LANCZOS)
88
+ return np.array(resized_image)
89
+
90
+
91
+ def just_crop(image, w, h):
92
+ if h == image.shape[0] and w == image.shape[1]:
93
+ return image
94
+
95
+ original_height, original_width = image.shape[:2]
96
+ k = min(original_height / h, original_width / w)
97
+ new_width = int(round(w * k))
98
+ new_height = int(round(h * k))
99
+ x_start = (original_width - new_width) // 2
100
+ y_start = (original_height - new_height) // 2
101
+ cropped_image = image[y_start:y_start + new_height, x_start:x_start + new_width]
102
+ return cropped_image
103
+
104
+
105
+ def write_to_json(data, file_path):
106
+ temp_file_path = file_path + ".tmp"
107
+ with open(temp_file_path, 'wt', encoding='utf-8') as temp_file:
108
+ json.dump(data, temp_file, indent=4)
109
+ os.replace(temp_file_path, file_path)
110
+ return
111
+
112
+
113
+ def read_from_json(file_path):
114
+ with open(file_path, 'rt', encoding='utf-8') as file:
115
+ data = json.load(file)
116
+ return data
117
+
118
+
119
+ def get_active_parameters(m):
120
+ return {k: v for k, v in m.named_parameters() if v.requires_grad}
121
+
122
+
123
+ def cast_training_params(m, dtype=torch.float32):
124
+ result = {}
125
+ for n, param in m.named_parameters():
126
+ if param.requires_grad:
127
+ param.data = param.to(dtype)
128
+ result[n] = param
129
+ return result
130
+
131
+
132
+ def separate_lora_AB(parameters, B_patterns=None):
133
+ parameters_normal = {}
134
+ parameters_B = {}
135
+
136
+ if B_patterns is None:
137
+ B_patterns = ['.lora_B.', '__zero__']
138
+
139
+ for k, v in parameters.items():
140
+ if any(B_pattern in k for B_pattern in B_patterns):
141
+ parameters_B[k] = v
142
+ else:
143
+ parameters_normal[k] = v
144
+
145
+ return parameters_normal, parameters_B
146
+
147
+
148
+ def set_attr_recursive(obj, attr, value):
149
+ attrs = attr.split(".")
150
+ for name in attrs[:-1]:
151
+ obj = getattr(obj, name)
152
+ setattr(obj, attrs[-1], value)
153
+ return
154
+
155
+
156
+ def print_tensor_list_size(tensors):
157
+ total_size = 0
158
+ total_elements = 0
159
+
160
+ if isinstance(tensors, dict):
161
+ tensors = tensors.values()
162
+
163
+ for tensor in tensors:
164
+ total_size += tensor.nelement() * tensor.element_size()
165
+ total_elements += tensor.nelement()
166
+
167
+ total_size_MB = total_size / (1024 ** 2)
168
+ total_elements_B = total_elements / 1e9
169
+
170
+ print(f"Total number of tensors: {len(tensors)}")
171
+ print(f"Total size of tensors: {total_size_MB:.2f} MB")
172
+ print(f"Total number of parameters: {total_elements_B:.3f} billion")
173
+ return
174
+
175
+
176
+ @torch.no_grad()
177
+ def batch_mixture(a, b=None, probability_a=0.5, mask_a=None):
178
+ batch_size = a.size(0)
179
+
180
+ if b is None:
181
+ b = torch.zeros_like(a)
182
+
183
+ if mask_a is None:
184
+ mask_a = torch.rand(batch_size) < probability_a
185
+
186
+ mask_a = mask_a.to(a.device)
187
+ mask_a = mask_a.reshape((batch_size,) + (1,) * (a.dim() - 1))
188
+ result = torch.where(mask_a, a, b)
189
+ return result
190
+
191
+
192
+ @torch.no_grad()
193
+ def zero_module(module):
194
+ for p in module.parameters():
195
+ p.detach().zero_()
196
+ return module
197
+
198
+
199
+ @torch.no_grad()
200
+ def supress_lower_channels(m, k, alpha=0.01):
201
+ data = m.weight.data.clone()
202
+
203
+ assert int(data.shape[1]) >= k
204
+
205
+ data[:, :k] = data[:, :k] * alpha
206
+ m.weight.data = data.contiguous().clone()
207
+ return m
208
+
209
+
210
+ def freeze_module(m):
211
+ if not hasattr(m, '_forward_inside_frozen_module'):
212
+ m._forward_inside_frozen_module = m.forward
213
+ m.requires_grad_(False)
214
+ m.forward = torch.no_grad()(m.forward)
215
+ return m
216
+
217
+
218
+ def get_latest_safetensors(folder_path):
219
+ safetensors_files = glob.glob(os.path.join(folder_path, '*.safetensors'))
220
+
221
+ if not safetensors_files:
222
+ raise ValueError('No file to resume!')
223
+
224
+ latest_file = max(safetensors_files, key=os.path.getmtime)
225
+ latest_file = os.path.abspath(os.path.realpath(latest_file))
226
+ return latest_file
227
+
228
+
229
+ def generate_random_prompt_from_tags(tags_str, min_length=3, max_length=32):
230
+ tags = tags_str.split(', ')
231
+ tags = random.sample(tags, k=min(random.randint(min_length, max_length), len(tags)))
232
+ prompt = ', '.join(tags)
233
+ return prompt
234
+
235
+
236
+ def interpolate_numbers(a, b, n, round_to_int=False, gamma=1.0):
237
+ numbers = a + (b - a) * (np.linspace(0, 1, n) ** gamma)
238
+ if round_to_int:
239
+ numbers = np.round(numbers).astype(int)
240
+ return numbers.tolist()
241
+
242
+
243
+ def uniform_random_by_intervals(inclusive, exclusive, n, round_to_int=False):
244
+ edges = np.linspace(0, 1, n + 1)
245
+ points = np.random.uniform(edges[:-1], edges[1:])
246
+ numbers = inclusive + (exclusive - inclusive) * points
247
+ if round_to_int:
248
+ numbers = np.round(numbers).astype(int)
249
+ return numbers.tolist()
250
+
251
+
252
+ def soft_append_bcthw(history, current, overlap=0):
253
+ if overlap <= 0:
254
+ return torch.cat([history, current], dim=2)
255
+
256
+ assert history.shape[2] >= overlap, f"History length ({history.shape[2]}) must be >= overlap ({overlap})"
257
+ assert current.shape[2] >= overlap, f"Current length ({current.shape[2]}) must be >= overlap ({overlap})"
258
+
259
+ weights = torch.linspace(1, 0, overlap, dtype=history.dtype, device=history.device).view(1, 1, -1, 1, 1)
260
+ blended = weights * history[:, :, -overlap:] + (1 - weights) * current[:, :, :overlap]
261
+ output = torch.cat([history[:, :, :-overlap], blended, current[:, :, overlap:]], dim=2)
262
+
263
+ return output.to(history)
264
+
265
+
266
+ def save_bcthw_as_mp4(x, output_filename, fps=10, crf=0):
267
+ b, c, t, h, w = x.shape
268
+
269
+ per_row = b
270
+ for p in [6, 5, 4, 3, 2]:
271
+ if b % p == 0:
272
+ per_row = p
273
+ break
274
+
275
+ os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True)
276
+ x = torch.clamp(x.float(), -1., 1.) * 127.5 + 127.5
277
+ x = x.detach().cpu().to(torch.uint8)
278
+ x = einops.rearrange(x, '(m n) c t h w -> t (m h) (n w) c', n=per_row)
279
+ torchvision.io.write_video(output_filename, x, fps=fps, video_codec='libx264', options={'crf': str(int(crf))})
280
+ return x
281
+
282
+
283
+ def save_bcthw_as_png(x, output_filename):
284
+ os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True)
285
+ x = torch.clamp(x.float(), -1., 1.) * 127.5 + 127.5
286
+ x = x.detach().cpu().to(torch.uint8)
287
+ x = einops.rearrange(x, 'b c t h w -> c (b h) (t w)')
288
+ torchvision.io.write_png(x, output_filename)
289
+ return output_filename
290
+
291
+
292
+ def save_bchw_as_png(x, output_filename):
293
+ os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True)
294
+ x = torch.clamp(x.float(), -1., 1.) * 127.5 + 127.5
295
+ x = x.detach().cpu().to(torch.uint8)
296
+ x = einops.rearrange(x, 'b c h w -> c h (b w)')
297
+ torchvision.io.write_png(x, output_filename)
298
+ return output_filename
299
+
300
+
301
+ def add_tensors_with_padding(tensor1, tensor2):
302
+ if tensor1.shape == tensor2.shape:
303
+ return tensor1 + tensor2
304
+
305
+ shape1 = tensor1.shape
306
+ shape2 = tensor2.shape
307
+
308
+ new_shape = tuple(max(s1, s2) for s1, s2 in zip(shape1, shape2))
309
+
310
+ padded_tensor1 = torch.zeros(new_shape)
311
+ padded_tensor2 = torch.zeros(new_shape)
312
+
313
+ padded_tensor1[tuple(slice(0, s) for s in shape1)] = tensor1
314
+ padded_tensor2[tuple(slice(0, s) for s in shape2)] = tensor2
315
+
316
+ result = padded_tensor1 + padded_tensor2
317
+ return result
318
+
319
+
320
+ def print_free_mem():
321
+ torch.cuda.empty_cache()
322
+ free_mem, total_mem = torch.cuda.mem_get_info(0)
323
+ free_mem_mb = free_mem / (1024 ** 2)
324
+ total_mem_mb = total_mem / (1024 ** 2)
325
+ print(f"Free memory: {free_mem_mb:.2f} MB")
326
+ print(f"Total memory: {total_mem_mb:.2f} MB")
327
+ return
328
+
329
+
330
+ def print_gpu_parameters(device, state_dict, log_count=1):
331
+ summary = {"device": device, "keys_count": len(state_dict)}
332
+
333
+ logged_params = {}
334
+ for i, (key, tensor) in enumerate(state_dict.items()):
335
+ if i >= log_count:
336
+ break
337
+ logged_params[key] = tensor.flatten()[:3].tolist()
338
+
339
+ summary["params"] = logged_params
340
+
341
+ print(str(summary))
342
+ return
343
+
344
+
345
+ def visualize_txt_as_img(width, height, text, font_path='font/DejaVuSans.ttf', size=18):
346
+ from PIL import Image, ImageDraw, ImageFont
347
+
348
+ txt = Image.new("RGB", (width, height), color="white")
349
+ draw = ImageDraw.Draw(txt)
350
+ font = ImageFont.truetype(font_path, size=size)
351
+
352
+ if text == '':
353
+ return np.array(txt)
354
+
355
+ # Split text into lines that fit within the image width
356
+ lines = []
357
+ words = text.split()
358
+ current_line = words[0]
359
+
360
+ for word in words[1:]:
361
+ line_with_word = f"{current_line} {word}"
362
+ if draw.textbbox((0, 0), line_with_word, font=font)[2] <= width:
363
+ current_line = line_with_word
364
+ else:
365
+ lines.append(current_line)
366
+ current_line = word
367
+
368
+ lines.append(current_line)
369
+
370
+ # Draw the text line by line
371
+ y = 0
372
+ line_height = draw.textbbox((0, 0), "A", font=font)[3]
373
+
374
+ for line in lines:
375
+ if y + line_height > height:
376
+ break # stop drawing if the next line will be outside the image
377
+ draw.text((0, y), line, fill="black", font=font)
378
+ y += line_height
379
+
380
+ return np.array(txt)
381
+
382
+
383
+ def blue_mark(x):
384
+ x = x.copy()
385
+ c = x[:, :, 2]
386
+ b = cv2.blur(c, (9, 9))
387
+ x[:, :, 2] = ((c - b) * 16.0 + b).clip(-1, 1)
388
+ return x
389
+
390
+
391
+ def green_mark(x):
392
+ x = x.copy()
393
+ x[:, :, 2] = -1
394
+ x[:, :, 0] = -1
395
+ return x
396
+
397
+
398
+ def frame_mark(x):
399
+ x = x.copy()
400
+ x[:64] = -1
401
+ x[-64:] = -1
402
+ x[:, :8] = 1
403
+ x[:, -8:] = 1
404
+ return x
405
+
406
+
407
+ @torch.inference_mode()
408
+ def pytorch2numpy(imgs):
409
+ results = []
410
+ for x in imgs:
411
+ y = x.movedim(0, -1)
412
+ y = y * 127.5 + 127.5
413
+ y = y.detach().float().cpu().numpy().clip(0, 255).astype(np.uint8)
414
+ results.append(y)
415
+ return results
416
+
417
+
418
+ @torch.inference_mode()
419
+ def numpy2pytorch(imgs):
420
+ h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.5 - 1.0
421
+ h = h.movedim(-1, 1)
422
+ return h
423
+
424
+
425
+ @torch.no_grad()
426
+ def duplicate_prefix_to_suffix(x, count, zero_out=False):
427
+ if zero_out:
428
+ return torch.cat([x, torch.zeros_like(x[:count])], dim=0)
429
+ else:
430
+ return torch.cat([x, x[:count]], dim=0)
431
+
432
+
433
+ def weighted_mse(a, b, weight):
434
+ return torch.mean(weight.float() * (a.float() - b.float()) ** 2)
435
+
436
+
437
+ def clamped_linear_interpolation(x, x_min, y_min, x_max, y_max, sigma=1.0):
438
+ x = (x - x_min) / (x_max - x_min)
439
+ x = max(0.0, min(x, 1.0))
440
+ x = x ** sigma
441
+ return y_min + x * (y_max - y_min)
442
+
443
+
444
+ def expand_to_dims(x, target_dims):
445
+ return x.view(*x.shape, *([1] * max(0, target_dims - x.dim())))
446
+
447
+
448
+ def repeat_to_batch_size(tensor: torch.Tensor, batch_size: int):
449
+ if tensor is None:
450
+ return None
451
+
452
+ first_dim = tensor.shape[0]
453
+
454
+ if first_dim == batch_size:
455
+ return tensor
456
+
457
+ if batch_size % first_dim != 0:
458
+ raise ValueError(f"Cannot evenly repeat first dim {first_dim} to match batch_size {batch_size}.")
459
+
460
+ repeat_times = batch_size // first_dim
461
+
462
+ return tensor.repeat(repeat_times, *[1] * (tensor.dim() - 1))
463
+
464
+
465
+ def dim5(x):
466
+ return expand_to_dims(x, 5)
467
+
468
+
469
+ def dim4(x):
470
+ return expand_to_dims(x, 4)
471
+
472
+
473
+ def dim3(x):
474
+ return expand_to_dims(x, 3)
475
+
476
+
477
+ def crop_or_pad_yield_mask(x, length):
478
+ B, F, C = x.shape
479
+ device = x.device
480
+ dtype = x.dtype
481
+
482
+ if F < length:
483
+ y = torch.zeros((B, length, C), dtype=dtype, device=device)
484
+ mask = torch.zeros((B, length), dtype=torch.bool, device=device)
485
+ y[:, :F, :] = x
486
+ mask[:, :F] = True
487
+ return y, mask
488
+
489
+ return x[:, :length, :], torch.ones((B, length), dtype=torch.bool, device=device)
490
+
491
+
492
+ def extend_dim(x, dim, minimal_length, zero_pad=False):
493
+ original_length = int(x.shape[dim])
494
+
495
+ if original_length >= minimal_length:
496
+ return x
497
+
498
+ if zero_pad:
499
+ padding_shape = list(x.shape)
500
+ padding_shape[dim] = minimal_length - original_length
501
+ padding = torch.zeros(padding_shape, dtype=x.dtype, device=x.device)
502
+ else:
503
+ idx = (slice(None),) * dim + (slice(-1, None),) + (slice(None),) * (len(x.shape) - dim - 1)
504
+ last_element = x[idx]
505
+ padding = last_element.repeat_interleave(minimal_length - original_length, dim=dim)
506
+
507
+ return torch.cat([x, padding], dim=dim)
508
+
509
+
510
+ def lazy_positional_encoding(t, repeats=None):
511
+ if not isinstance(t, list):
512
+ t = [t]
513
+
514
+ from diffusers.models.embeddings import get_timestep_embedding
515
+
516
+ te = torch.tensor(t)
517
+ te = get_timestep_embedding(timesteps=te, embedding_dim=256, flip_sin_to_cos=True, downscale_freq_shift=0.0, scale=1.0)
518
+
519
+ if repeats is None:
520
+ return te
521
+
522
+ te = te[:, None, :].expand(-1, repeats, -1)
523
+
524
+ return te
525
+
526
+
527
+ def state_dict_offset_merge(A, B, C=None):
528
+ result = {}
529
+ keys = A.keys()
530
+
531
+ for key in keys:
532
+ A_value = A[key]
533
+ B_value = B[key].to(A_value)
534
+
535
+ if C is None:
536
+ result[key] = A_value + B_value
537
+ else:
538
+ C_value = C[key].to(A_value)
539
+ result[key] = A_value + B_value - C_value
540
+
541
+ return result
542
+
543
+
544
+ def state_dict_weighted_merge(state_dicts, weights):
545
+ if len(state_dicts) != len(weights):
546
+ raise ValueError("Number of state dictionaries must match number of weights")
547
+
548
+ if not state_dicts:
549
+ return {}
550
+
551
+ total_weight = sum(weights)
552
+
553
+ if total_weight == 0:
554
+ raise ValueError("Sum of weights cannot be zero")
555
+
556
+ normalized_weights = [w / total_weight for w in weights]
557
+
558
+ keys = state_dicts[0].keys()
559
+ result = {}
560
+
561
+ for key in keys:
562
+ result[key] = state_dicts[0][key] * normalized_weights[0]
563
+
564
+ for i in range(1, len(state_dicts)):
565
+ state_dict_value = state_dicts[i][key].to(result[key])
566
+ result[key] += state_dict_value * normalized_weights[i]
567
+
568
+ return result
569
+
570
+
571
+ def group_files_by_folder(all_files):
572
+ grouped_files = {}
573
+
574
+ for file in all_files:
575
+ folder_name = os.path.basename(os.path.dirname(file))
576
+ if folder_name not in grouped_files:
577
+ grouped_files[folder_name] = []
578
+ grouped_files[folder_name].append(file)
579
+
580
+ list_of_lists = list(grouped_files.values())
581
+ return list_of_lists
582
+
583
+
584
+ def generate_timestamp():
585
+ now = datetime.datetime.now()
586
+ timestamp = now.strftime('%y%m%d_%H%M%S')
587
+ milliseconds = f"{int(now.microsecond / 1000):03d}"
588
+ random_number = random.randint(0, 9999)
589
+ return f"{timestamp}_{milliseconds}_{random_number}"
590
+
591
+
592
+ def write_PIL_image_with_png_info(image, metadata, path):
593
+ from PIL.PngImagePlugin import PngInfo
594
+
595
+ png_info = PngInfo()
596
+ for key, value in metadata.items():
597
+ png_info.add_text(key, value)
598
+
599
+ image.save(path, "PNG", pnginfo=png_info)
600
+ return image
601
+
602
+
603
+ def torch_safe_save(content, path):
604
+ torch.save(content, path + '_tmp')
605
+ os.replace(path + '_tmp', path)
606
+ return path
607
+
608
+
609
+ def move_optimizer_to_device(optimizer, device):
610
+ for state in optimizer.state.values():
611
+ for k, v in state.items():
612
+ if isinstance(v, torch.Tensor):
613
+ state[k] = v.to(device)
modules/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # modules/__init__.py
2
+
modules/interface.py ADDED
@@ -0,0 +1,899 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import time
3
+ import datetime
4
+ import random
5
+ import json
6
+ import os
7
+ from typing import List, Dict, Any, Optional
8
+ from PIL import Image
9
+ import numpy as np
10
+ import base64
11
+ import io
12
+
13
+ from modules.video_queue import JobStatus, Job
14
+ from modules.prompt_handler import get_section_boundaries, get_quick_prompts, parse_timestamped_prompt
15
+ from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
16
+
17
+
18
+ def create_interface(
19
+ process_fn,
20
+ monitor_fn,
21
+ end_process_fn,
22
+ update_queue_status_fn,
23
+ load_lora_file_fn,
24
+ job_queue,
25
+ settings,
26
+ default_prompt: str = '[1s: The person waves hello] [3s: The person jumps up and down] [5s: The person does a dance]',
27
+ lora_names: list = [],
28
+ lora_values: list = []
29
+ ):
30
+ """
31
+ Create the Gradio interface for the video generation application
32
+
33
+ Args:
34
+ process_fn: Function to process a new job
35
+ monitor_fn: Function to monitor an existing job
36
+ end_process_fn: Function to cancel the current job
37
+ update_queue_status_fn: Function to update the queue status display
38
+ default_prompt: Default prompt text
39
+ lora_names: List of loaded LoRA names
40
+
41
+ Returns:
42
+ Gradio Blocks interface
43
+ """
44
+ # Get section boundaries and quick prompts
45
+ section_boundaries = get_section_boundaries()
46
+ quick_prompts = get_quick_prompts()
47
+
48
+ # Create the interface
49
+ css = make_progress_bar_css()
50
+ css += """
51
+ .contain-image img {
52
+ object-fit: contain !important;
53
+ width: 100% !important;
54
+ height: 100% !important;
55
+ background: #222;
56
+ }
57
+ """
58
+
59
+ css += """
60
+ #fixed-toolbar {
61
+ position: fixed;
62
+ top: 0;
63
+ left: 0;
64
+ width: 100vw;
65
+ z-index: 1000;
66
+ background: rgb(11, 15, 25);
67
+ color: #fff;
68
+ padding: 10px 20px;
69
+ display: flex;
70
+ align-items: center;
71
+ gap: 16px;
72
+ box-shadow: 0 2px 8px rgba(0,0,0,0.1);
73
+ border-bottom: 1px solid #4f46e5;
74
+ }
75
+ #toolbar-add-to-queue-btn button {
76
+ font-size: 14px !important;
77
+ padding: 4px 16px !important;
78
+ height: 32px !important;
79
+ min-width: 80px !important;
80
+ }
81
+
82
+
83
+
84
+ .gr-button-primary{
85
+ color:white;
86
+ }
87
+ body, .gradio-container {
88
+ padding-top: 40px !important;
89
+ }
90
+ """
91
+
92
+ css += """
93
+ .narrow-button {
94
+ min-width: 40px !important;
95
+ width: 40px !important;
96
+ padding: 0 !important;
97
+ margin: 0 !important;
98
+ }
99
+ """
100
+
101
+ # Get the theme from settings
102
+ current_theme = settings.get("gradio_theme", "default") # Use default if not found
103
+ block = gr.Blocks(css=css, title="FramePack Studio", theme=current_theme).queue()
104
+
105
+ with block:
106
+
107
+ with gr.Row(elem_id="fixed-toolbar"):
108
+ gr.Markdown("<h1 style='margin:0;color:white;'>FramePack Studio</h1>")
109
+ # with gr.Column(scale=1):
110
+ # queue_stats_display = gr.Markdown("<p style='margin:0;color:white;'>Queue: 0 | Completed: 0</p>")
111
+ with gr.Column(scale=0):
112
+ refresh_stats_btn = gr.Button("⟳", elem_id="refresh-stats-btn")
113
+
114
+
115
+ # Hidden state to track the selected model type
116
+ selected_model_type = gr.State("Original")
117
+
118
+ with gr.Tabs():
119
+ with gr.Tab("Generate (Original)", id="original_tab"):
120
+ with gr.Row():
121
+ with gr.Column(scale=2):
122
+ input_image = gr.Image(
123
+ sources='upload',
124
+ type="numpy",
125
+ label="Image (optional)",
126
+ height=420,
127
+ elem_classes="contain-image"
128
+ )
129
+
130
+
131
+ with gr.Accordion("Latent Image Options", open=False):
132
+ latent_type = gr.Dropdown(
133
+ ["Black", "White", "Noise", "Green Screen"], label="Latent Image", value="Black", info="Used as a starting point if no image is provided"
134
+ )
135
+
136
+ prompt = gr.Textbox(label="Prompt", value=default_prompt)
137
+
138
+ with gr.Accordion("Prompt Parameters", open=False):
139
+ blend_sections = gr.Slider(
140
+ minimum=0, maximum=10, value=4, step=1,
141
+ label="Number of sections to blend between prompts"
142
+ )
143
+ with gr.Accordion("Generation Parameters", open=True):
144
+ with gr.Row():
145
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1)
146
+ total_second_length = gr.Slider(label="Video Length (Seconds)", minimum=1, maximum=120, value=6, step=0.1)
147
+ with gr.Row("Resolution"):
148
+ resolutionW = gr.Slider(
149
+ label="Width", minimum=128, maximum=768, value=640, step=32,
150
+ info="Nearest valid width will be used."
151
+ )
152
+ resolutionH = gr.Slider(
153
+ label="Height", minimum=128, maximum=768, value=640, step=32,
154
+ info="Nearest valid height will be used."
155
+ )
156
+ def on_input_image_change(img):
157
+ if img is not None:
158
+ return gr.update(info="Nearest valid bucket size will be used. Height will be adjusted automatically."), gr.update(visible=False)
159
+ else:
160
+ return gr.update(info="Nearest valid width will be used."), gr.update(visible=True)
161
+ input_image.change(fn=on_input_image_change, inputs=[input_image], outputs=[resolutionW, resolutionH])
162
+ with gr.Row("LoRAs"):
163
+ lora_selector = gr.Dropdown(
164
+ choices=lora_names,
165
+ label="Select LoRAs to Load",
166
+ multiselect=True,
167
+ value=[],
168
+ info="Select one or more LoRAs to use for this job"
169
+ )
170
+ lora_names_states = gr.State(lora_names)
171
+ lora_sliders = {}
172
+ for lora in lora_names:
173
+ lora_sliders[lora] = gr.Slider(
174
+ minimum=0.0, maximum=2.0, value=1.0, step=0.01,
175
+ label=f"{lora} Weight", visible=False, interactive=True
176
+ )
177
+
178
+ with gr.Row("Metadata"):
179
+ json_upload = gr.File(
180
+ label="Upload Metadata JSON (optional)",
181
+ file_types=[".json"],
182
+ type="filepath",
183
+ height=100,
184
+ )
185
+ save_metadata = gr.Checkbox(label="Save Metadata", value=True, info="Save to JSON file")
186
+ with gr.Row("TeaCache"):
187
+ use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
188
+ n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False) # Not used
189
+
190
+ with gr.Row():
191
+ seed = gr.Number(label="Seed", value=31337, precision=0)
192
+ randomize_seed = gr.Checkbox(label="Randomize", value=False, info="Generate a new random seed for each job")
193
+
194
+ with gr.Accordion("Advanced Parameters", open=False):
195
+ latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=True, info='Change at your own risk, very experimental') # Should not change
196
+ cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=False) # Should not change
197
+ gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01)
198
+ rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
199
+ gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=1, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
200
+ with gr.Accordion("Output Parameters", open=False):
201
+ mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
202
+ clean_up_videos = gr.Checkbox(
203
+ label="Clean up video files",
204
+ value=True,
205
+ info="If checked, only the final video will be kept after generation."
206
+ )
207
+
208
+ with gr.Column():
209
+ preview_image = gr.Image(label="Next Latents", height=150, visible=True, type="numpy", interactive=False)
210
+ result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=256, loop=True)
211
+ progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
212
+ progress_bar = gr.HTML('', elem_classes='no-generating-animation')
213
+
214
+ with gr.Row():
215
+ current_job_id = gr.Textbox(label="Current Job ID", visible=True, interactive=True)
216
+ end_button = gr.Button(value="Cancel Current Job", interactive=True)
217
+ start_button = gr.Button(value="Add to Queue", elem_id="toolbar-add-to-queue-btn")
218
+
219
+ with gr.Tab("Generate (F1)", id="f1_tab"):
220
+ with gr.Row():
221
+ with gr.Column(scale=2):
222
+ f1_input_image = gr.Image(
223
+ sources='upload',
224
+ type="numpy",
225
+ label="Image (optional)",
226
+ height=420,
227
+ elem_classes="contain-image"
228
+ )
229
+
230
+
231
+ with gr.Accordion("Latent Image Options", open=False):
232
+ f1_latent_type = gr.Dropdown(
233
+ ["Black", "White", "Noise", "Green Screen"], label="Latent Image", value="Black", info="Used as a starting point if no image is provided"
234
+ )
235
+
236
+ f1_prompt = gr.Textbox(label="Prompt", value=default_prompt)
237
+
238
+ with gr.Accordion("Prompt Parameters", open=False):
239
+ f1_blend_sections = gr.Slider(
240
+ minimum=0, maximum=10, value=4, step=1,
241
+ label="Number of sections to blend between prompts"
242
+ )
243
+ with gr.Accordion("Generation Parameters", open=True):
244
+ with gr.Row():
245
+ f1_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1)
246
+ f1_total_second_length = gr.Slider(label="Video Length (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
247
+ with gr.Row("Resolution"):
248
+ f1_resolutionW = gr.Slider(
249
+ label="Width", minimum=128, maximum=768, value=640, step=32,
250
+ info="Nearest valid width will be used."
251
+ )
252
+ f1_resolutionH = gr.Slider(
253
+ label="Height", minimum=128, maximum=768, value=640, step=32,
254
+ info="Nearest valid height will be used."
255
+ )
256
+ def f1_on_input_image_change(img):
257
+ if img is not None:
258
+ return gr.update(info="Nearest valid bucket size will be used. Height will be adjusted automatically."), gr.update(visible=False)
259
+ else:
260
+ return gr.update(info="Nearest valid width will be used."), gr.update(visible=True)
261
+ f1_input_image.change(fn=f1_on_input_image_change, inputs=[f1_input_image], outputs=[f1_resolutionW, f1_resolutionH])
262
+ with gr.Row("LoRAs"):
263
+ f1_lora_selector = gr.Dropdown(
264
+ choices=lora_names,
265
+ label="Select LoRAs to Load",
266
+ multiselect=True,
267
+ value=[],
268
+ info="Select one or more LoRAs to use for this job"
269
+ )
270
+ f1_lora_names_states = gr.State(lora_names)
271
+ f1_lora_sliders = {}
272
+ for lora in lora_names:
273
+ f1_lora_sliders[lora] = gr.Slider(
274
+ minimum=0.0, maximum=2.0, value=1.0, step=0.01,
275
+ label=f"{lora} Weight", visible=False, interactive=True
276
+ )
277
+
278
+ with gr.Row("Metadata"):
279
+ f1_json_upload = gr.File(
280
+ label="Upload Metadata JSON (optional)",
281
+ file_types=[".json"],
282
+ type="filepath",
283
+ height=100,
284
+ )
285
+ f1_save_metadata = gr.Checkbox(label="Save Metadata", value=True, info="Save to JSON file")
286
+ with gr.Row("TeaCache"):
287
+ f1_use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
288
+ f1_n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=True)
289
+
290
+ with gr.Row():
291
+ f1_seed = gr.Number(label="Seed", value=31337, precision=0)
292
+ f1_randomize_seed = gr.Checkbox(label="Randomize", value=False, info="Generate a new random seed for each job")
293
+
294
+ with gr.Accordion("Advanced Parameters", open=False):
295
+ f1_latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=True, info='Change at your own risk, very experimental')
296
+ f1_cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=False)
297
+ f1_gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01)
298
+ f1_rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False)
299
+ f1_gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=1, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
300
+ with gr.Accordion("Output Parameters", open=False):
301
+ f1_mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
302
+ f1_clean_up_videos = gr.Checkbox(
303
+ label="Clean up video files",
304
+ value=True,
305
+ info="If checked, only the final video will be kept after generation."
306
+ )
307
+
308
+ with gr.Column():
309
+ f1_preview_image = gr.Image(label="Next Latents", height=150, visible=True, type="numpy", interactive=False)
310
+ f1_result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=256, loop=True)
311
+ f1_progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
312
+ f1_progress_bar = gr.HTML('', elem_classes='no-generating-animation')
313
+
314
+ with gr.Row():
315
+ f1_current_job_id = gr.Textbox(label="Current Job ID", visible=True, interactive=True)
316
+ f1_end_button = gr.Button(value="Cancel Current Job", interactive=True)
317
+ f1_start_button = gr.Button(value="Add to Queue", elem_id="toolbar-add-to-queue-btn")
318
+
319
+ with gr.Tab("Queue"):
320
+ with gr.Row():
321
+ with gr.Column():
322
+ # Create a container for the queue status
323
+ with gr.Row():
324
+ queue_status = gr.DataFrame(
325
+ headers=["Job ID", "Type", "Status", "Created", "Started", "Completed", "Elapsed"], # Removed Preview header
326
+ datatype=["str", "str", "str", "str", "str", "str", "str"], # Removed image datatype
327
+ label="Job Queue"
328
+ )
329
+ with gr.Row():
330
+ refresh_button = gr.Button("Refresh Queue")
331
+ # Connect the refresh button (Moved inside 'with block')
332
+ refresh_button.click(
333
+ fn=update_queue_status_fn, # Use the function passed in
334
+ inputs=[],
335
+ outputs=[queue_status]
336
+ )
337
+ # Create a container for thumbnails (kept for potential future use, though not displayed in DataFrame)
338
+ with gr.Row():
339
+ thumbnail_container = gr.Column()
340
+ thumbnail_container.elem_classes = ["thumbnail-container"]
341
+
342
+ # Add CSS for thumbnails
343
+ css += """
344
+ .thumbnail-container {
345
+ display: flex;
346
+ flex-wrap: wrap;
347
+ gap: 10px;
348
+ padding: 10px;
349
+ }
350
+ .thumbnail-item {
351
+ width: 100px;
352
+ height: 100px;
353
+ border: 1px solid #444;
354
+ border-radius: 4px;
355
+ overflow: hidden;
356
+ }
357
+ .thumbnail-item img {
358
+ width: 100%;
359
+ height: 100%;
360
+ object-fit: cover;
361
+ }
362
+ """
363
+ # with gr.TabItem("Outputs"):
364
+ # outputDirectory = settings.get("output_dir", settings.default_settings['output_dir'])
365
+ # def get_gallery_items():
366
+ # items = []
367
+ # for f in os.listdir(outputDirectory):
368
+ # if f.endswith(".png"):
369
+ # prefix = os.path.splitext(f)[0]
370
+ # latest_video = get_latest_video_version(prefix)
371
+ # if latest_video:
372
+ # video_path = os.path.join(outputDirectory, latest_video)
373
+ # mtime = os.path.getmtime(video_path)
374
+ # preview_path = os.path.join(outputDirectory, f)
375
+ # items.append((preview_path, prefix, mtime))
376
+ # items.sort(key=lambda x: x[2], reverse=True)
377
+ # return [(i[0], i[1]) for i in items]
378
+ # def get_latest_video_version(prefix):
379
+ # max_number = -1
380
+ # selected_file = None
381
+ # for f in os.listdir(outputDirectory):
382
+ # if f.startswith(prefix + "_") and f.endswith(".mp4"):
383
+ # num = int(f.replace(prefix + "_", '').replace(".mp4", ''))
384
+ # if num > max_number:
385
+ # max_number = num
386
+ # selected_file = f
387
+ # return selected_file
388
+ # def load_video_and_info_from_prefix(prefix):
389
+ # video_file = get_latest_video_version(prefix)
390
+ # if not video_file:
391
+ # return None, "JSON not found."
392
+ # video_path = os.path.join(outputDirectory, video_file)
393
+ # json_path = os.path.join(outputDirectory, prefix) + ".json"
394
+ # info = {"description": "no info"}
395
+ # if os.path.exists(json_path):
396
+ # with open(json_path, "r", encoding="utf-8") as f:
397
+ # info = json.load(f)
398
+ # return video_path, json.dumps(info, indent=2, ensure_ascii=False)
399
+ # gallery_items_state = gr.State(get_gallery_items())
400
+ # with gr.Row():
401
+ # with gr.Column(scale=2):
402
+ # thumbs = gr.Gallery(
403
+ # # value=[i[0] for i in get_gallery_items()],
404
+ # columns=[4],
405
+ # allow_preview=False,
406
+ # object_fit="cover",
407
+ # height="auto"
408
+ # )
409
+ # refresh_button = gr.Button("Update")
410
+ # with gr.Column(scale=5):
411
+ # video_out = gr.Video(sources=[], autoplay=True, loop=True, visible=False)
412
+ # with gr.Column(scale=1):
413
+ # info_out = gr.Textbox(label="Generation info", visible=False)
414
+ # def refresh_gallery():
415
+ # new_items = get_gallery_items()
416
+ # return gr.update(value=[i[0] for i in new_items]), new_items
417
+ # refresh_button.click(fn=refresh_gallery, outputs=[thumbs, gallery_items_state])
418
+
419
+ # def on_select(evt: gr.SelectData, gallery_items):
420
+ # prefix = gallery_items[evt.index][1]
421
+ # video, info = load_video_and_info_from_prefix(prefix)
422
+ # return gr.update(value=video, visible=True), gr.update(value=info, visible=True)
423
+ # thumbs.select(fn=on_select, inputs=[gallery_items_state], outputs=[video_out, info_out])
424
+
425
+ with gr.Tab("Settings"):
426
+ with gr.Row():
427
+ with gr.Column():
428
+ output_dir = gr.Textbox(
429
+ label="Output Directory",
430
+ value=settings.get("output_dir"),
431
+ placeholder="Path to save generated videos"
432
+ )
433
+ metadata_dir = gr.Textbox(
434
+ label="Metadata Directory",
435
+ value=settings.get("metadata_dir"),
436
+ placeholder="Path to save metadata files"
437
+ )
438
+ lora_dir = gr.Textbox(
439
+ label="LoRA Directory",
440
+ value=settings.get("lora_dir"),
441
+ placeholder="Path to LoRA models"
442
+ )
443
+ gradio_temp_dir = gr.Textbox(label="Gradio Temporary Directory", value=settings.get("gradio_temp_dir"))
444
+ auto_save = gr.Checkbox(
445
+ label="Auto-save settings",
446
+ value=settings.get("auto_save_settings", True)
447
+ )
448
+ # Add Gradio Theme Dropdown
449
+ gradio_themes = ["default", "base", "soft", "glass", "mono", "huggingface"]
450
+ theme_dropdown = gr.Dropdown(
451
+ label="Theme",
452
+ choices=gradio_themes,
453
+ value=settings.get("gradio_theme", "soft"),
454
+ info="Select the Gradio UI theme. Requires restart."
455
+ )
456
+ save_btn = gr.Button("Save Settings")
457
+ cleanup_btn = gr.Button("Clean Up Temporary Files")
458
+ status = gr.HTML("")
459
+ cleanup_output = gr.Textbox(label="Cleanup Status", interactive=False)
460
+
461
+ def save_settings(output_dir, metadata_dir, lora_dir, gradio_temp_dir, auto_save, selected_theme):
462
+ try:
463
+ settings.save_settings(
464
+ output_dir=output_dir,
465
+ metadata_dir=metadata_dir,
466
+ lora_dir=lora_dir,
467
+ gradio_temp_dir=gradio_temp_dir,
468
+ auto_save_settings=auto_save,
469
+ gradio_theme=selected_theme
470
+ )
471
+ return "<p style='color:green;'>Settings saved successfully! Restart required for theme change.</p>"
472
+ except Exception as e:
473
+ return f"<p style='color:red;'>Error saving settings: {str(e)}</p>"
474
+
475
+ save_btn.click(
476
+ fn=save_settings,
477
+ inputs=[output_dir, metadata_dir, lora_dir, gradio_temp_dir, auto_save, theme_dropdown],
478
+ outputs=[status]
479
+ )
480
+
481
+ def cleanup_temp_files():
482
+ """Clean up temporary files in the Gradio temp directory"""
483
+ temp_dir = settings.get("gradio_temp_dir")
484
+ if not temp_dir or not os.path.exists(temp_dir):
485
+ return "No temporary directory found or directory does not exist."
486
+
487
+ try:
488
+ # Get all files in the temp directory
489
+ files = os.listdir(temp_dir)
490
+ removed_count = 0
491
+
492
+ for file in files:
493
+ file_path = os.path.join(temp_dir, file)
494
+ try:
495
+ if os.path.isfile(file_path):
496
+ os.remove(file_path)
497
+ removed_count += 1
498
+ except Exception as e:
499
+ print(f"Error removing {file_path}: {e}")
500
+
501
+ return f"Cleaned up {removed_count} temporary files."
502
+ except Exception as e:
503
+ return f"Error cleaning up temporary files: {str(e)}"
504
+
505
+ cleanup_btn.click(
506
+ fn=cleanup_temp_files,
507
+ outputs=[cleanup_output]
508
+ )
509
+
510
+ # --- Event Handlers and Connections (Now correctly indented) ---
511
+
512
+ # Connect the main process function (wrapper for adding to queue)
513
+ def process_with_queue_update(model_type, *args):
514
+ # Extract all arguments (ensure order matches inputs lists)
515
+ input_image, prompt_text, n_prompt, seed_value, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf, randomize_seed_checked, save_metadata_checked, blend_sections, latent_type, clean_up_videos, selected_loras, resolutionW, resolutionH, *lora_args = args
516
+
517
+ # DO NOT parse the prompt here. Parsing happens once in the worker.
518
+
519
+ # Use the current seed value as is for this job
520
+ # Call the process function with all arguments
521
+ # Pass the model_type and the ORIGINAL prompt_text string to the backend process function
522
+ result = process_fn(model_type, input_image, prompt_text, n_prompt, seed_value, total_second_length, # Pass original prompt_text string
523
+ latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation,
524
+ use_teacache, mp4_crf, save_metadata_checked, blend_sections, latent_type, clean_up_videos, selected_loras, resolutionW, resolutionH, *lora_args)
525
+
526
+ # If randomize_seed is checked, generate a new random seed for the next job
527
+ new_seed_value = None
528
+ if randomize_seed_checked:
529
+ new_seed_value = random.randint(0, 21474)
530
+ print(f"Generated new seed for next job: {new_seed_value}")
531
+
532
+ # If a job ID was created, automatically start monitoring it and update queue
533
+ if result and result[1]: # Check if job_id exists in results
534
+ job_id = result[1]
535
+ queue_status_data = update_queue_status_fn()
536
+
537
+ # Add the new seed value to the results if randomize is checked
538
+ if new_seed_value is not None:
539
+ return [result[0], job_id, result[2], result[3], result[4], result[5], result[6], queue_status_data, new_seed_value]
540
+ else:
541
+ return [result[0], job_id, result[2], result[3], result[4], result[5], result[6], queue_status_data, gr.update()]
542
+
543
+ # If no job ID was created, still return the new seed if randomize is checked
544
+ if new_seed_value is not None:
545
+ return result + [update_queue_status_fn(), new_seed_value]
546
+ else:
547
+ return result + [update_queue_status_fn(), gr.update()]
548
+
549
+ # Custom end process function that ensures the queue is updated
550
+ def end_process_with_update():
551
+ queue_status_data = end_process_fn()
552
+ # Make sure to return the queue status data
553
+ return queue_status_data
554
+
555
+ # --- Inputs Lists ---
556
+ # --- Inputs for Original Model ---
557
+ ips = [
558
+ input_image,
559
+ prompt,
560
+ n_prompt,
561
+ seed,
562
+ total_second_length,
563
+ latent_window_size,
564
+ steps,
565
+ cfg,
566
+ gs,
567
+ rs,
568
+ gpu_memory_preservation,
569
+ use_teacache,
570
+ mp4_crf,
571
+ randomize_seed,
572
+ save_metadata,
573
+ blend_sections,
574
+ latent_type,
575
+ clean_up_videos,
576
+ lora_selector,
577
+ resolutionW,
578
+ resolutionH,
579
+ lora_names_states
580
+ ]
581
+ # Add LoRA sliders to the input list
582
+ ips.extend([lora_sliders[lora] for lora in lora_names])
583
+
584
+ # --- Inputs for F1 Model ---
585
+ f1_ips = [
586
+ f1_input_image,
587
+ f1_prompt,
588
+ f1_n_prompt,
589
+ f1_seed,
590
+ f1_total_second_length,
591
+ f1_latent_window_size,
592
+ f1_steps,
593
+ f1_cfg,
594
+ f1_gs,
595
+ f1_rs,
596
+ f1_gpu_memory_preservation,
597
+ f1_use_teacache,
598
+ f1_mp4_crf,
599
+ f1_randomize_seed,
600
+ f1_save_metadata,
601
+ f1_blend_sections,
602
+ f1_latent_type,
603
+ f1_clean_up_videos,
604
+ f1_lora_selector,
605
+ f1_resolutionW,
606
+ f1_resolutionH,
607
+ f1_lora_names_states
608
+ ]
609
+ # Add F1 LoRA sliders to the input list
610
+ f1_ips.extend([f1_lora_sliders[lora] for lora in lora_names])
611
+
612
+ # --- Connect Buttons ---
613
+ start_button.click(
614
+ # Pass "Original" model type
615
+ fn=lambda *args: process_with_queue_update("Original", *args),
616
+ inputs=ips,
617
+ outputs=[result_video, current_job_id, preview_image, progress_desc, progress_bar, start_button, end_button, queue_status, seed]
618
+ )
619
+
620
+ f1_start_button.click(
621
+ # Pass "F1" model type
622
+ fn=lambda *args: process_with_queue_update("F1", *args),
623
+ inputs=f1_ips,
624
+ # Update F1 outputs and shared queue/job ID
625
+ outputs=[f1_result_video, f1_current_job_id, f1_preview_image, f1_progress_desc, f1_progress_bar, f1_start_button, f1_end_button, queue_status, f1_seed]
626
+ )
627
+
628
+ # Connect the end button to cancel the current job and update the queue
629
+ end_button.click(
630
+ fn=end_process_with_update,
631
+ outputs=[queue_status]
632
+ )
633
+ f1_end_button.click(
634
+ fn=end_process_with_update,
635
+ outputs=[queue_status] # Update shared queue status display
636
+ )
637
+
638
+ # --- Connect Monitoring ---
639
+ # Auto-monitor the current job when job_id changes
640
+ # Monitor original tab
641
+ current_job_id.change(
642
+ fn=monitor_fn,
643
+ inputs=[current_job_id],
644
+ outputs=[result_video, current_job_id, preview_image, progress_desc, progress_bar, start_button, end_button]
645
+ )
646
+
647
+ # Monitor F1 tab (using the same monitor function for now, assuming job IDs are unique)
648
+ f1_current_job_id.change(
649
+ fn=monitor_fn,
650
+ inputs=[f1_current_job_id],
651
+ outputs=[f1_result_video, f1_current_job_id, f1_preview_image, f1_progress_desc, f1_progress_bar, f1_start_button, f1_end_button]
652
+ )
653
+
654
+ # --- Connect Queue Refresh ---
655
+ refresh_stats_btn.click(
656
+ fn=lambda: update_queue_status_fn(), # Use update_queue_status_fn passed in
657
+ inputs=None,
658
+ outputs=[queue_status] # Removed queue_stats_display from outputs
659
+ )
660
+
661
+ # Set up auto-refresh for queue status (using a timer)
662
+ refresh_timer = gr.Number(value=0, visible=False)
663
+ def refresh_timer_fn():
664
+ """Updates the timer value periodically to trigger queue refresh"""
665
+ return int(time.time())
666
+ # This timer seems unused, maybe intended for block.load()? Keeping definition for now.
667
+ # refresh_timer.change(
668
+ # fn=update_queue_status_fn, # Use the function passed in
669
+ # outputs=[queue_status] # Update shared queue status display
670
+ # )
671
+
672
+ # --- Connect LoRA UI ---
673
+ # Function to update slider visibility based on selection
674
+ def update_lora_sliders(selected_loras):
675
+ updates = []
676
+ # Need to handle potential missing keys if lora_names changes dynamically
677
+ # For now, assume lora_names passed to create_interface is static
678
+ for lora in lora_names:
679
+ updates.append(gr.update(visible=(lora in selected_loras)))
680
+ # Ensure the output list matches the number of sliders defined
681
+ num_sliders = len(lora_sliders)
682
+ return updates[:num_sliders] # Return only updates for existing sliders
683
+
684
+ # Connect the dropdown to the sliders
685
+ lora_selector.change(
686
+ fn=update_lora_sliders,
687
+ inputs=[lora_selector],
688
+ outputs=[lora_sliders[lora] for lora in lora_names] # Assumes lora_sliders keys match lora_names
689
+ )
690
+
691
+ # Function to update F1 LoRA sliders
692
+ def update_f1_lora_sliders(selected_loras):
693
+ updates = []
694
+ for lora in lora_names:
695
+ updates.append(gr.update(visible=(lora in selected_loras)))
696
+ num_sliders = len(f1_lora_sliders)
697
+ return updates[:num_sliders]
698
+
699
+ # Connect the F1 dropdown to the F1 sliders
700
+ f1_lora_selector.change(
701
+ fn=update_f1_lora_sliders,
702
+ inputs=[f1_lora_selector],
703
+ outputs=[f1_lora_sliders[lora] for lora in lora_names]
704
+ )
705
+
706
+ # --- Connect Metadata Loading ---
707
+ # Function to load metadata from JSON file
708
+ def load_metadata_from_json(json_path):
709
+ if not json_path:
710
+ # Return updates for all potentially affected components
711
+ num_orig_sliders = len(lora_sliders)
712
+ return [gr.update()] * (2 + num_orig_sliders)
713
+
714
+ try:
715
+ with open(json_path, 'r') as f:
716
+ metadata = json.load(f)
717
+
718
+ prompt_val = metadata.get('prompt')
719
+ seed_val = metadata.get('seed')
720
+
721
+ # Check for LoRA values in metadata
722
+ lora_weights = metadata.get('loras', {}) # Changed key to 'loras' based on studio.py worker
723
+
724
+ print(f"Loaded metadata from JSON: {json_path}")
725
+ print(f"Prompt: {prompt_val}, Seed: {seed_val}")
726
+
727
+ # Update the UI components
728
+ updates = [
729
+ gr.update(value=prompt_val) if prompt_val else gr.update(),
730
+ gr.update(value=seed_val) if seed_val is not None else gr.update()
731
+ ]
732
+
733
+ # Update LoRA sliders if they exist in metadata
734
+ for lora in lora_names:
735
+ if lora in lora_weights:
736
+ updates.append(gr.update(value=lora_weights[lora]))
737
+ else:
738
+ updates.append(gr.update()) # No change if LoRA not in metadata
739
+
740
+ # Ensure the number of updates matches the number of outputs
741
+ num_orig_sliders = len(lora_sliders)
742
+ return updates[:2 + num_orig_sliders] # Return updates for prompt, seed, and sliders
743
+
744
+ except Exception as e:
745
+ print(f"Error loading metadata: {e}")
746
+ num_orig_sliders = len(lora_sliders)
747
+ return [gr.update()] * (2 + num_orig_sliders)
748
+
749
+
750
+ # Connect JSON metadata loader for Original tab
751
+ json_upload.change(
752
+ fn=load_metadata_from_json,
753
+ inputs=[json_upload],
754
+ outputs=[prompt, seed] + [lora_sliders[lora] for lora in lora_names]
755
+ )
756
+
757
+ # Connect F1 JSON metadata loader (using same function, assumes outputs match)
758
+ # Need to ensure the output list matches the F1 components
759
+ f1_json_upload.change(
760
+ fn=load_metadata_from_json,
761
+ inputs=[f1_json_upload],
762
+ outputs=[f1_prompt, f1_seed] + [f1_lora_sliders[lora] for lora in lora_names] # Match F1 components
763
+ )
764
+
765
+ # --- Helper Functions (defined within create_interface scope if needed by handlers) ---
766
+ # Function to get queue statistics
767
+ def get_queue_stats():
768
+ try:
769
+ # Get all jobs from the queue
770
+ jobs = job_queue.get_all_jobs()
771
+
772
+ # Count jobs by status
773
+ status_counts = {
774
+ "QUEUED": 0,
775
+ "RUNNING": 0,
776
+ "COMPLETED": 0,
777
+ "FAILED": 0,
778
+ "CANCELLED": 0
779
+ }
780
+
781
+ for job in jobs:
782
+ if hasattr(job, 'status'):
783
+ status = str(job.status) # Use str() for safety
784
+ if status in status_counts:
785
+ status_counts[status] += 1
786
+
787
+ # Format the display text
788
+ stats_text = f"Queue: {status_counts['QUEUED']} | Running: {status_counts['RUNNING']} | Completed: {status_counts['COMPLETED']} | Failed: {status_counts['FAILED']} | Cancelled: {status_counts['CANCELLED']}"
789
+
790
+ return f"<p style='margin:0;color:white;'>{stats_text}</p>"
791
+
792
+ except Exception as e:
793
+ print(f"Error getting queue stats: {e}")
794
+ return "<p style='margin:0;color:white;'>Error loading queue stats</p>"
795
+
796
+ # Add footer with social links
797
+ with gr.Row(elem_id="footer"):
798
+ with gr.Column(scale=1):
799
+ gr.HTML("""
800
+ <div style="text-align: center; padding: 20px; color: #666;">
801
+ <div style="margin-top: 10px;">
802
+ <a href="https://patreon.com/Colinu" target="_blank" style="margin: 0 10px; color: #666; text-decoration: none;">
803
+ <i class="fab fa-patreon"></i>Support on Patreon
804
+ </a>
805
+ <a href="https://discord.gg/MtuM7gFJ3V" target="_blank" style="margin: 0 10px; color: #666; text-decoration: none;">
806
+ <i class="fab fa-discord"></i> Discord
807
+ </a>
808
+ <a href="https://github.com/colinurbs/FramePack-Studio" target="_blank" style="margin: 0 10px; color: #666; text-decoration: none;">
809
+ <i class="fab fa-github"></i> GitHub
810
+ </a>
811
+ </div>
812
+ </div>
813
+ """)
814
+
815
+ # Add CSS for footer
816
+ css += """
817
+ #footer {
818
+ margin-top: 20px;
819
+ padding: 20px;
820
+ border-top: 1px solid #eee;
821
+ }
822
+ #footer a:hover {
823
+ color: #4f46e5 !important;
824
+ }
825
+ """
826
+
827
+ return block
828
+
829
+
830
+ # --- Top-level Helper Functions (Used by Gradio callbacks, must be defined outside create_interface) ---
831
+
832
+ def format_queue_status(jobs):
833
+ """Format job data for display in the queue status table"""
834
+ rows = []
835
+ for job in jobs:
836
+ created = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(job.created_at)) if job.created_at else ""
837
+ started = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(job.started_at)) if job.started_at else ""
838
+ completed = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(job.completed_at)) if job.completed_at else ""
839
+
840
+ # Calculate elapsed time
841
+ elapsed_time = ""
842
+ if job.started_at:
843
+ if job.completed_at:
844
+ start_datetime = datetime.datetime.fromtimestamp(job.started_at)
845
+ complete_datetime = datetime.datetime.fromtimestamp(job.completed_at)
846
+ elapsed_seconds = (complete_datetime - start_datetime).total_seconds()
847
+ elapsed_time = f"{elapsed_seconds:.2f}s"
848
+ else:
849
+ # For running jobs, calculate elapsed time from now
850
+ start_datetime = datetime.datetime.fromtimestamp(job.started_at)
851
+ current_datetime = datetime.datetime.now()
852
+ elapsed_seconds = (current_datetime - start_datetime).total_seconds()
853
+ elapsed_time = f"{elapsed_seconds:.2f}s (running)"
854
+
855
+ # Get generation type from job data
856
+ generation_type = getattr(job, 'generation_type', 'Original')
857
+
858
+ # Removed thumbnail processing
859
+
860
+ rows.append([
861
+ job.id[:6] + '...',
862
+ generation_type,
863
+ job.status.value,
864
+ created,
865
+ started,
866
+ completed,
867
+ elapsed_time
868
+ # Removed thumbnail from row data
869
+ ])
870
+ return rows
871
+
872
+ # Create the queue status update function (wrapper around format_queue_status)
873
+ def update_queue_status_with_thumbnails(): # Function name is now slightly misleading, but keep for now to avoid breaking clicks
874
+ # This function is likely called by the refresh button and potentially the timer
875
+ # It needs access to the job_queue object
876
+ # Assuming job_queue is accessible globally or passed appropriately
877
+ # For now, let's assume it's globally accessible as defined in studio.py
878
+ # If not, this needs adjustment based on how job_queue is managed.
879
+ try:
880
+ # Need access to the global job_queue instance from studio.py
881
+ # This might require restructuring or passing job_queue differently.
882
+ # For now, assuming it's accessible (this might fail if run standalone)
883
+ from __main__ import job_queue # Attempt to import from main script scope
884
+
885
+ jobs = job_queue.get_all_jobs()
886
+ for job in jobs:
887
+ if job.status == JobStatus.PENDING:
888
+ job.queue_position = job_queue.get_queue_position(job.id)
889
+
890
+ if job_queue.current_job:
891
+ job_queue.current_job.status = JobStatus.RUNNING
892
+
893
+ return format_queue_status(jobs)
894
+ except ImportError:
895
+ print("Error: Could not import job_queue. Queue status update might fail.")
896
+ return [] # Return empty list on error
897
+ except Exception as e:
898
+ print(f"Error updating queue status: {e}")
899
+ return []
modules/prompt_handler.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from dataclasses import dataclass
3
+ from typing import List, Optional
4
+
5
+
6
+ @dataclass
7
+ class PromptSection:
8
+ """Represents a section of the prompt with specific timing information"""
9
+ prompt: str
10
+ start_time: float = 0 # in seconds
11
+ end_time: Optional[float] = None # in seconds, None means until the end
12
+
13
+
14
+ def snap_to_section_boundaries(prompt_sections: List[PromptSection], latent_window_size: int, fps: int = 30) -> List[PromptSection]:
15
+ """
16
+ Adjust timestamps to align with model's internal section boundaries
17
+
18
+ Args:
19
+ prompt_sections: List of PromptSection objects
20
+ latent_window_size: Size of the latent window used in the model
21
+ fps: Frames per second (default: 30)
22
+
23
+ Returns:
24
+ List of PromptSection objects with aligned timestamps
25
+ """
26
+ section_duration = (latent_window_size * 4 - 3) / fps # Duration of one section in seconds
27
+
28
+ aligned_sections = []
29
+ for section in prompt_sections:
30
+ # Snap start time to nearest section boundary
31
+ aligned_start = round(section.start_time / section_duration) * section_duration
32
+
33
+ # Snap end time to nearest section boundary
34
+ aligned_end = None
35
+ if section.end_time is not None:
36
+ aligned_end = round(section.end_time / section_duration) * section_duration
37
+
38
+ # Ensure minimum section length
39
+ if aligned_end is not None and aligned_end <= aligned_start:
40
+ aligned_end = aligned_start + section_duration
41
+
42
+ aligned_sections.append(PromptSection(
43
+ prompt=section.prompt,
44
+ start_time=aligned_start,
45
+ end_time=aligned_end
46
+ ))
47
+
48
+ return aligned_sections
49
+
50
+
51
+ def parse_timestamped_prompt(prompt_text: str, total_duration: float, latent_window_size: int = 9, generation_type: str = "Original") -> List[PromptSection]:
52
+ """
53
+ Parse a prompt with timestamps in the format [0s-2s: text] or [3s: text]
54
+
55
+ Args:
56
+ prompt_text: The input prompt text with optional timestamp sections
57
+ total_duration: Total duration of the video in seconds
58
+ latent_window_size: Size of the latent window used in the model
59
+ generation_type: Type of generation ("Original" or "F1")
60
+
61
+ Returns:
62
+ List of PromptSection objects with timestamps aligned to section boundaries
63
+ and reversed to account for reverse generation (only for Original type)
64
+ """
65
+ # Default prompt for the entire duration if no timestamps are found
66
+ if "[" not in prompt_text or "]" not in prompt_text:
67
+ return [PromptSection(prompt=prompt_text.strip())]
68
+
69
+ sections = []
70
+ # Find all timestamp sections [time: text]
71
+ timestamp_pattern = r'\[(\d+(?:\.\d+)?s)(?:-(\d+(?:\.\d+)?s))?\s*:\s*(.*?)\]'
72
+ regular_text = prompt_text
73
+
74
+ for match in re.finditer(timestamp_pattern, prompt_text):
75
+ start_time_str = match.group(1)
76
+ end_time_str = match.group(2)
77
+ section_text = match.group(3).strip()
78
+
79
+ # Convert time strings to seconds
80
+ start_time = float(start_time_str.rstrip('s'))
81
+ end_time = float(end_time_str.rstrip('s')) if end_time_str else None
82
+
83
+ sections.append(PromptSection(
84
+ prompt=section_text,
85
+ start_time=start_time,
86
+ end_time=end_time
87
+ ))
88
+
89
+ # Remove the processed section from regular_text
90
+ regular_text = regular_text.replace(match.group(0), "")
91
+
92
+ # If there's any text outside of timestamp sections, use it as a default for the entire duration
93
+ regular_text = regular_text.strip()
94
+ if regular_text:
95
+ sections.append(PromptSection(
96
+ prompt=regular_text,
97
+ start_time=0,
98
+ end_time=None
99
+ ))
100
+
101
+ # Sort sections by start time
102
+ sections.sort(key=lambda x: x.start_time)
103
+
104
+ # Fill in end times if not specified
105
+ for i in range(len(sections) - 1):
106
+ if sections[i].end_time is None:
107
+ sections[i].end_time = sections[i+1].start_time
108
+
109
+ # Set the last section's end time to the total duration if not specified
110
+ if sections and sections[-1].end_time is None:
111
+ sections[-1].end_time = total_duration
112
+
113
+ # Snap timestamps to section boundaries
114
+ sections = snap_to_section_boundaries(sections, latent_window_size)
115
+
116
+ # Only reverse timestamps for Original generation type
117
+ if generation_type == "Original":
118
+ # Now reverse the timestamps to account for reverse generation
119
+ reversed_sections = []
120
+ for section in sections:
121
+ reversed_start = total_duration - section.end_time if section.end_time is not None else 0
122
+ reversed_end = total_duration - section.start_time
123
+ reversed_sections.append(PromptSection(
124
+ prompt=section.prompt,
125
+ start_time=reversed_start,
126
+ end_time=reversed_end
127
+ ))
128
+
129
+ # Sort the reversed sections by start time
130
+ reversed_sections.sort(key=lambda x: x.start_time)
131
+ return reversed_sections
132
+
133
+ return sections
134
+
135
+
136
+ def get_section_boundaries(latent_window_size: int = 9, count: int = 10) -> str:
137
+ """
138
+ Calculate and format section boundaries for UI display
139
+
140
+ Args:
141
+ latent_window_size: Size of the latent window used in the model
142
+ count: Number of boundaries to display
143
+
144
+ Returns:
145
+ Formatted string of section boundaries
146
+ """
147
+ section_duration = (latent_window_size * 4 - 3) / 30
148
+ return ", ".join([f"{i*section_duration:.1f}s" for i in range(count)])
149
+
150
+
151
+ def get_quick_prompts() -> List[List[str]]:
152
+ """
153
+ Get a list of example timestamped prompts
154
+
155
+ Returns:
156
+ List of example prompts formatted for Gradio Dataset
157
+ """
158
+ prompts = [
159
+ '[0s: The person waves hello] [2s: The person jumps up and down] [4s: The person does a spin]',
160
+ '[0s: The person raises both arms slowly] [2s: The person claps hands enthusiastically]',
161
+ '[0s: Person gives thumbs up] [1.1s: Person smiles and winks] [2.2s: Person shows two thumbs down]',
162
+ '[0s: Person looks surprised] [1.1s: Person raises arms above head] [2.2s-3.3s: Person puts hands on hips]'
163
+ ]
164
+ return [[x] for x in prompts]
modules/settings.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Dict, Any, Optional
4
+ import os
5
+
6
+ class Settings:
7
+ def __init__(self):
8
+ # Get the project root directory (where settings.py is located)
9
+ project_root = Path(__file__).parent.parent
10
+
11
+ self.settings_file = project_root / ".framepack" / "settings.json"
12
+ self.settings_file.parent.mkdir(parents=True, exist_ok=True)
13
+
14
+ # Set default paths relative to project root
15
+ self.default_settings = {
16
+ "output_dir": str(project_root / "outputs"),
17
+ "metadata_dir": str(project_root / "outputs"),
18
+ "lora_dir": str(project_root / "loras"),
19
+ "gradio_temp_dir": str(project_root / "temp"),
20
+ "auto_save_settings": True,
21
+ "gradio_theme": "default"
22
+ }
23
+ self.settings = self.load_settings()
24
+
25
+ def load_settings(self) -> Dict[str, Any]:
26
+ """Load settings from file or return defaults"""
27
+ if self.settings_file.exists():
28
+ try:
29
+ with open(self.settings_file, 'r') as f:
30
+ loaded_settings = json.load(f)
31
+ # Merge with defaults to ensure all settings exist
32
+ settings = self.default_settings.copy()
33
+ settings.update(loaded_settings)
34
+ return settings
35
+ except Exception as e:
36
+ print(f"Error loading settings: {e}")
37
+ return self.default_settings.copy()
38
+ return self.default_settings.copy()
39
+
40
+ def save_settings(self, output_dir, metadata_dir, lora_dir, gradio_temp_dir, auto_save_settings, gradio_theme="default"):
41
+ """Save settings to file"""
42
+ self.settings = {
43
+ "output_dir": output_dir,
44
+ "metadata_dir": metadata_dir,
45
+ "lora_dir": lora_dir,
46
+ "gradio_temp_dir": gradio_temp_dir,
47
+ "auto_save_settings": auto_save_settings,
48
+ "gradio_theme": gradio_theme
49
+ }
50
+
51
+ # Ensure directories exist
52
+ os.makedirs(output_dir, exist_ok=True)
53
+ os.makedirs(metadata_dir, exist_ok=True)
54
+ os.makedirs(lora_dir, exist_ok=True)
55
+ os.makedirs(gradio_temp_dir, exist_ok=True)
56
+
57
+ # Save to file
58
+ with open(self.settings_file, 'w') as f:
59
+ json.dump(self.settings, f, indent=4)
60
+
61
+ def get(self, key: str, default: Any = None) -> Any:
62
+ """Get a setting value"""
63
+ return self.settings.get(key, default)
64
+
65
+ def set(self, key: str, value: Any) -> None:
66
+ """Set a setting value"""
67
+ self.settings[key] = value
68
+ if self.settings.get("auto_save_settings", True):
69
+ self.save_settings()
70
+
71
+ def update(self, settings: Dict[str, Any]) -> None:
72
+ """Update multiple settings at once"""
73
+ self.settings.update(settings)
74
+ if self.settings.get("auto_save_settings", True):
75
+ self.save_settings()
modules/video_queue.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ import time
3
+ import uuid
4
+ from dataclasses import dataclass, field
5
+ from enum import Enum
6
+ from typing import Dict, Any, Optional, List
7
+ import queue as queue_module # Renamed to avoid conflicts
8
+ import io
9
+ import base64
10
+ from PIL import Image
11
+ import numpy as np
12
+
13
+ from diffusers_helper.thread_utils import AsyncStream
14
+
15
+
16
+ # Simple LIFO queue implementation to avoid dependency on queue.LifoQueue
17
+ class SimpleLifoQueue:
18
+ def __init__(self):
19
+ self._queue = []
20
+ self._mutex = threading.Lock()
21
+ self._not_empty = threading.Condition(self._mutex)
22
+
23
+ def put(self, item):
24
+ with self._mutex:
25
+ self._queue.append(item)
26
+ self._not_empty.notify()
27
+
28
+ def get(self):
29
+ with self._not_empty:
30
+ while not self._queue:
31
+ self._not_empty.wait()
32
+ return self._queue.pop()
33
+
34
+ def task_done(self):
35
+ pass # For compatibility with queue.Queue
36
+
37
+
38
+ class JobStatus(Enum):
39
+ PENDING = "pending"
40
+ RUNNING = "running"
41
+ COMPLETED = "completed"
42
+ FAILED = "failed"
43
+ CANCELLED = "cancelled"
44
+
45
+
46
+ @dataclass
47
+ class Job:
48
+ id: str
49
+ params: Dict[str, Any]
50
+ status: JobStatus = JobStatus.PENDING
51
+ created_at: float = field(default_factory=time.time)
52
+ started_at: Optional[float] = None
53
+ completed_at: Optional[float] = None
54
+ error: Optional[str] = None
55
+ result: Optional[str] = None
56
+ progress_data: Optional[Dict] = None
57
+ queue_position: Optional[int] = None
58
+ stream: Optional[Any] = None
59
+ input_image: Optional[np.ndarray] = None
60
+ latent_type: Optional[str] = None
61
+ thumbnail: Optional[str] = None
62
+ generation_type: Optional[str] = None # Added generation_type
63
+
64
+ def __post_init__(self):
65
+ # Store generation type
66
+ self.generation_type = self.params.get('model_type', 'Original') # Initialize generation_type
67
+
68
+ # Store input image or latent type
69
+ if 'input_image' in self.params and self.params['input_image'] is not None:
70
+ self.input_image = self.params['input_image']
71
+ # Create thumbnail
72
+ img = Image.fromarray(self.input_image)
73
+ img.thumbnail((100, 100))
74
+ buffered = io.BytesIO()
75
+ img.save(buffered, format="PNG")
76
+ self.thumbnail = f"data:image/png;base64,{base64.b64encode(buffered.getvalue()).decode()}"
77
+ elif 'latent_type' in self.params:
78
+ self.latent_type = self.params['latent_type']
79
+ # Create a colored square based on latent type
80
+ color_map = {
81
+ "Black": (0, 0, 0),
82
+ "White": (255, 255, 255),
83
+ "Noise": (128, 128, 128),
84
+ "Green Screen": (0, 177, 64)
85
+ }
86
+ color = color_map.get(self.latent_type, (0, 0, 0))
87
+ img = Image.new('RGB', (100, 100), color)
88
+ buffered = io.BytesIO()
89
+ img.save(buffered, format="PNG")
90
+ self.thumbnail = f"data:image/png;base64,{base64.b64encode(buffered.getvalue()).decode()}"
91
+
92
+
93
+ class VideoJobQueue:
94
+ def __init__(self):
95
+ self.queue = queue_module.Queue() # Using standard Queue instead of LifoQueue
96
+ self.jobs = {}
97
+ self.current_job = None
98
+ self.lock = threading.Lock()
99
+ self.worker_thread = threading.Thread(target=self._worker_loop, daemon=True)
100
+ self.worker_thread.start()
101
+ self.worker_function = None # Will be set from outside
102
+ self.is_processing = False # Flag to track if we're currently processing a job
103
+
104
+ def set_worker_function(self, worker_function):
105
+ """Set the worker function to use for processing jobs"""
106
+ self.worker_function = worker_function
107
+
108
+ def add_job(self, params):
109
+ """Add a job to the queue and return its ID"""
110
+ job_id = str(uuid.uuid4())
111
+ job = Job(
112
+ id=job_id,
113
+ params=params,
114
+ status=JobStatus.PENDING,
115
+ created_at=time.time(),
116
+ progress_data={},
117
+ stream=AsyncStream()
118
+ )
119
+
120
+ with self.lock:
121
+ print(f"Adding job {job_id} to queue, current job is {self.current_job.id if self.current_job else 'None'}")
122
+ self.jobs[job_id] = job
123
+ self.queue.put(job_id)
124
+
125
+ return job_id
126
+
127
+ def get_job(self, job_id):
128
+ """Get job by ID"""
129
+ with self.lock:
130
+ return self.jobs.get(job_id)
131
+
132
+ def get_all_jobs(self):
133
+ """Get all jobs"""
134
+ with self.lock:
135
+ return list(self.jobs.values())
136
+
137
+ def cancel_job(self, job_id):
138
+ """Cancel a pending job"""
139
+ with self.lock:
140
+ job = self.jobs.get(job_id)
141
+ if job and job.status == JobStatus.PENDING:
142
+ job.status = JobStatus.CANCELLED
143
+ job.completed_at = time.time() # Mark completion time
144
+ return True
145
+ elif job and job.status == JobStatus.RUNNING:
146
+ # Send cancel signal to the job's stream
147
+ job.stream.input_queue.push('end')
148
+ # Mark job as cancelled (this will be confirmed when the worker processes the end signal)
149
+ job.status = JobStatus.CANCELLED
150
+ job.completed_at = time.time() # Mark completion time
151
+ return True
152
+ return False
153
+
154
+ def get_queue_position(self, job_id):
155
+ """Get position in queue (0 = currently running)"""
156
+ with self.lock:
157
+ job = self.jobs.get(job_id)
158
+ if not job:
159
+ return None
160
+
161
+ if job.status == JobStatus.RUNNING:
162
+ return 0
163
+
164
+ if job.status != JobStatus.PENDING:
165
+ return None
166
+
167
+ # Count pending jobs ahead in queue
168
+ position = 1 # Start at 1 because 0 means running
169
+ for j in self.jobs.values():
170
+ if (j.status == JobStatus.PENDING and
171
+ j.created_at < job.created_at):
172
+ position += 1
173
+ return position
174
+
175
+ def update_job_progress(self, job_id, progress_data):
176
+ """Update job progress data"""
177
+ with self.lock:
178
+ job = self.jobs.get(job_id)
179
+ if job:
180
+ job.progress_data = progress_data
181
+
182
+ def _worker_loop(self):
183
+ """Worker thread that processes jobs from the queue"""
184
+ while True:
185
+ try:
186
+ # Get the next job ID from the queue
187
+ try:
188
+ job_id = self.queue.get(block=True, timeout=1.0) # Use timeout to allow periodic checks
189
+ except queue_module.Empty:
190
+ # No jobs in queue, just continue the loop
191
+ continue
192
+
193
+ with self.lock:
194
+ job = self.jobs.get(job_id)
195
+ if not job:
196
+ self.queue.task_done()
197
+ continue
198
+
199
+ # Skip cancelled jobs
200
+ if job.status == JobStatus.CANCELLED:
201
+ self.queue.task_done()
202
+ continue
203
+
204
+ # If we're already processing a job, wait for it to complete
205
+ if self.is_processing:
206
+ # Put the job back in the queue
207
+ self.queue.put(job_id)
208
+ self.queue.task_done()
209
+ time.sleep(0.1) # Small delay to prevent busy waiting
210
+ continue
211
+
212
+ print(f"Starting job {job_id}, current job was {self.current_job.id if self.current_job else 'None'}")
213
+ job.status = JobStatus.RUNNING
214
+ job.started_at = time.time()
215
+ self.current_job = job
216
+ self.is_processing = True
217
+
218
+ job_completed = False
219
+
220
+ try:
221
+ if self.worker_function is None:
222
+ raise ValueError("Worker function not set. Call set_worker_function() first.")
223
+
224
+ # Start the worker function with the job parameters
225
+ from diffusers_helper.thread_utils import async_run
226
+ async_run(
227
+ self.worker_function,
228
+ **job.params,
229
+ job_stream=job.stream
230
+ )
231
+
232
+ # Process the results from the stream
233
+ output_filename = None
234
+
235
+ # Set a maximum time to wait for the job to complete
236
+ max_wait_time = 3600 # 1 hour in seconds
237
+ start_time = time.time()
238
+ last_activity_time = time.time()
239
+
240
+ while True:
241
+ # Check if job has been cancelled before processing next output
242
+ with self.lock:
243
+ if job.status == JobStatus.CANCELLED:
244
+ print(f"Job {job_id} was cancelled, breaking out of processing loop")
245
+ job_completed = True
246
+ break
247
+
248
+ # Check if we've been waiting too long without any activity
249
+ current_time = time.time()
250
+ if current_time - start_time > max_wait_time:
251
+ print(f"Job {job_id} timed out after {max_wait_time} seconds")
252
+ with self.lock:
253
+ job.status = JobStatus.FAILED
254
+ job.error = "Job timed out"
255
+ job.completed_at = time.time()
256
+ job_completed = True
257
+ break
258
+
259
+ # Check for inactivity (no output for a while)
260
+ if current_time - last_activity_time > 60: # 1 minute of inactivity
261
+ print(f"Checking if job {job_id} is still active...")
262
+ # Just a periodic check, don't break yet
263
+
264
+ try:
265
+ # Try to get data from the queue with a non-blocking approach
266
+ flag, data = job.stream.output_queue.next()
267
+
268
+ # Update activity time since we got some data
269
+ last_activity_time = time.time()
270
+
271
+ if flag == 'file':
272
+ output_filename = data
273
+ with self.lock:
274
+ job.result = output_filename
275
+
276
+ elif flag == 'progress':
277
+ preview, desc, html = data
278
+ with self.lock:
279
+ job.progress_data = {
280
+ 'preview': preview,
281
+ 'desc': desc,
282
+ 'html': html
283
+ }
284
+
285
+ elif flag == 'end':
286
+ print(f"Received end signal for job {job_id}")
287
+ job_completed = True
288
+ break
289
+
290
+ except IndexError:
291
+ # Queue is empty, wait a bit and try again
292
+ time.sleep(0.1)
293
+ continue
294
+ except Exception as e:
295
+ print(f"Error processing job output: {e}")
296
+ # Wait a bit before trying again
297
+ time.sleep(0.1)
298
+ continue
299
+ except Exception as e:
300
+ import traceback
301
+ traceback.print_exc()
302
+ print(f"Error processing job {job_id}: {e}")
303
+ with self.lock:
304
+ job.status = JobStatus.FAILED
305
+ job.error = str(e)
306
+ job.completed_at = time.time()
307
+ job_completed = True
308
+
309
+ finally:
310
+ with self.lock:
311
+ # Make sure we properly clean up the job state
312
+ if job.status == JobStatus.RUNNING:
313
+ if job_completed:
314
+ job.status = JobStatus.COMPLETED
315
+ else:
316
+ # Something went wrong but we didn't mark it as completed
317
+ job.status = JobStatus.FAILED
318
+ job.error = "Job processing was interrupted"
319
+
320
+ job.completed_at = time.time()
321
+
322
+ print(f"Finishing job {job_id} with status {job.status}")
323
+ self.is_processing = False
324
+ self.current_job = None
325
+ self.queue.task_done()
326
+
327
+ except Exception as e:
328
+ import traceback
329
+ traceback.print_exc()
330
+ print(f"Error in worker loop: {e}")
331
+
332
+ # Make sure we reset processing state if there was an error
333
+ with self.lock:
334
+ self.is_processing = False
335
+ if self.current_job:
336
+ self.current_job.status = JobStatus.FAILED
337
+ self.current_job.error = f"Worker loop error: {str(e)}"
338
+ self.current_job.completed_at = time.time()
339
+ self.current_job = None
340
+
341
+ time.sleep(0.5) # Prevent tight loop on error
requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==1.6.0
2
+ diffusers==0.33.1
3
+ transformers==4.46.2
4
+ gradio==5.25.2
5
+ sentencepiece==0.2.0
6
+ pillow==11.1.0
7
+ av==12.1.0
8
+ numpy==1.26.2
9
+ scipy==1.12.0
10
+ requests==2.31.0
11
+ torchsde==0.2.6
12
+ jinja2>=3.1.2
13
+ torchvision
14
+ einops
15
+ opencv-contrib-python
16
+ safetensors
17
+ peft
studio.py ADDED
@@ -0,0 +1,1115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers_helper.hf_login import login
2
+
3
+ import json
4
+ import os
5
+ import time
6
+ import argparse
7
+ import traceback
8
+ import einops
9
+ import numpy as np
10
+ import torch
11
+
12
+ os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
13
+
14
+ import gradio as gr
15
+ from PIL import Image
16
+ from PIL.PngImagePlugin import PngInfo
17
+ from diffusers import AutoencoderKLHunyuanVideo
18
+ from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
19
+ from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
20
+ from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, generate_timestamp
21
+ from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
22
+ from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
23
+ from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
24
+ from diffusers_helper.thread_utils import AsyncStream
25
+ from diffusers_helper.gradio.progress_bar import make_progress_bar_html
26
+ from transformers import SiglipImageProcessor, SiglipVisionModel
27
+ from diffusers_helper.clip_vision import hf_clip_vision_encode
28
+ from diffusers_helper.bucket_tools import find_nearest_bucket
29
+ from diffusers_helper import lora_utils
30
+ from diffusers_helper.lora_utils import load_lora, unload_all_loras
31
+
32
+ # Import from modules
33
+ from modules.video_queue import VideoJobQueue, JobStatus
34
+ from modules.prompt_handler import parse_timestamped_prompt
35
+ from modules.interface import create_interface, format_queue_status
36
+ from modules.settings import Settings
37
+
38
+ # ADDED: Debug function to verify LoRA state
39
+ def verify_lora_state(transformer, label=""):
40
+ """Debug function to verify the state of LoRAs in a transformer model"""
41
+ if transformer is None:
42
+ print(f"[{label}] Transformer is None, cannot verify LoRA state")
43
+ return
44
+
45
+ has_loras = False
46
+ if hasattr(transformer, 'peft_config'):
47
+ adapter_names = list(transformer.peft_config.keys()) if transformer.peft_config else []
48
+ if adapter_names:
49
+ has_loras = True
50
+ print(f"[{label}] Transformer has LoRAs: {', '.join(adapter_names)}")
51
+ else:
52
+ print(f"[{label}] Transformer has no LoRAs in peft_config")
53
+ else:
54
+ print(f"[{label}] Transformer has no peft_config attribute")
55
+
56
+ # Check for any LoRA modules
57
+ for name, module in transformer.named_modules():
58
+ if hasattr(module, 'lora_A') and module.lora_A:
59
+ has_loras = True
60
+ # print(f"[{label}] Found lora_A in module {name}")
61
+ if hasattr(module, 'lora_B') and module.lora_B:
62
+ has_loras = True
63
+ # print(f"[{label}] Found lora_B in module {name}")
64
+
65
+ if not has_loras:
66
+ print(f"[{label}] No LoRA components found in transformer")
67
+
68
+
69
+ parser = argparse.ArgumentParser()
70
+ parser.add_argument('--share', action='store_true')
71
+ parser.add_argument("--server", type=str, default='0.0.0.0')
72
+ parser.add_argument("--port", type=int, required=False)
73
+ parser.add_argument("--inbrowser", action='store_true')
74
+ parser.add_argument("--lora", type=str, default=None, help="Lora path (comma separated for multiple)")
75
+ args = parser.parse_args()
76
+
77
+ print(args)
78
+
79
+ free_mem_gb = get_cuda_free_memory_gb(gpu)
80
+ high_vram = free_mem_gb > 60
81
+
82
+ print(f'Free VRAM {free_mem_gb} GB')
83
+ print(f'High-VRAM Mode: {high_vram}')
84
+
85
+ # Load models
86
+ text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
87
+ text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
88
+ tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
89
+ tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
90
+ vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
91
+
92
+ feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
93
+ image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
94
+
95
+ # Initialize transformer placeholders
96
+ transformer_original = None
97
+ transformer_f1 = None
98
+ current_transformer = None # Will hold the currently active model
99
+
100
+ # Load models based on VRAM availability later
101
+
102
+ # Configure models
103
+ vae.eval()
104
+ text_encoder.eval()
105
+ text_encoder_2.eval()
106
+ image_encoder.eval()
107
+
108
+ if not high_vram:
109
+ vae.enable_slicing()
110
+ vae.enable_tiling()
111
+
112
+
113
+ vae.to(dtype=torch.float16)
114
+ image_encoder.to(dtype=torch.float16)
115
+ text_encoder.to(dtype=torch.float16)
116
+ text_encoder_2.to(dtype=torch.float16)
117
+
118
+ vae.requires_grad_(False)
119
+ text_encoder.requires_grad_(False)
120
+ text_encoder_2.requires_grad_(False)
121
+ image_encoder.requires_grad_(False)
122
+
123
+ # Create lora directory if it doesn't exist
124
+ lora_dir = os.path.join(os.path.dirname(__file__), 'loras')
125
+ os.makedirs(lora_dir, exist_ok=True)
126
+
127
+ # Initialize LoRA support - moved scanning after settings load
128
+ lora_names = []
129
+ lora_values = [] # This seems unused for population, might be related to weights later
130
+
131
+ script_dir = os.path.dirname(os.path.abspath(__file__))
132
+
133
+ # Define default LoRA folder path relative to the script directory (used if setting is missing)
134
+ default_lora_folder = os.path.join(script_dir, "loras")
135
+ os.makedirs(default_lora_folder, exist_ok=True) # Ensure default exists
136
+
137
+ if not high_vram:
138
+ # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
139
+ DynamicSwapInstaller.install_model(text_encoder, device=gpu)
140
+ else:
141
+ text_encoder.to(gpu)
142
+ text_encoder_2.to(gpu)
143
+ image_encoder.to(gpu)
144
+ vae.to(gpu)
145
+
146
+ stream = AsyncStream()
147
+
148
+ outputs_folder = './outputs/'
149
+ os.makedirs(outputs_folder, exist_ok=True)
150
+
151
+ # Initialize settings
152
+ settings = Settings()
153
+
154
+ # --- Populate LoRA names AFTER settings are loaded ---
155
+ lora_folder_from_settings = settings.get("lora_dir", default_lora_folder) # Use setting, fallback to default
156
+ print(f"Scanning for LoRAs in: {lora_folder_from_settings}")
157
+ if os.path.isdir(lora_folder_from_settings):
158
+ try:
159
+ lora_files = [f for f in os.listdir(lora_folder_from_settings)
160
+ if f.endswith('.safetensors') or f.endswith('.pt')]
161
+ for lora_file in lora_files:
162
+ lora_names.append(lora_file.split('.')[0]) # Get name without extension
163
+ print(f"Found LoRAs: {lora_names}")
164
+ except Exception as e:
165
+ print(f"Error scanning LoRA directory '{lora_folder_from_settings}': {e}")
166
+ else:
167
+ print(f"LoRA directory not found: {lora_folder_from_settings}")
168
+ # --- End LoRA population ---
169
+
170
+
171
+ # Create job queue
172
+ job_queue = VideoJobQueue()
173
+
174
+
175
+ def move_lora_adapters_to_device(model, target_device):
176
+ """
177
+ Move all LoRA adapters in a model to the specified device.
178
+ This handles the PEFT implementation of LoRA.
179
+ """
180
+ print(f"Moving all LoRA adapters to {target_device}")
181
+
182
+ # First, find all modules with LoRA adapters
183
+ lora_modules = []
184
+ for name, module in model.named_modules():
185
+ if hasattr(module, 'active_adapter') and hasattr(module, 'lora_A') and hasattr(module, 'lora_B'):
186
+ lora_modules.append((name, module))
187
+
188
+ # Now move all LoRA components to the target device
189
+ for name, module in lora_modules:
190
+ # Get the active adapter name
191
+ active_adapter = module.active_adapter
192
+
193
+ # Move the LoRA layers to the target device
194
+ if active_adapter is not None:
195
+ if isinstance(module.lora_A, torch.nn.ModuleDict):
196
+ # Handle ModuleDict case (PEFT implementation)
197
+ for adapter_name in list(module.lora_A.keys()):
198
+ # Move lora_A
199
+ if adapter_name in module.lora_A:
200
+ module.lora_A[adapter_name] = module.lora_A[adapter_name].to(target_device)
201
+
202
+ # Move lora_B
203
+ if adapter_name in module.lora_B:
204
+ module.lora_B[adapter_name] = module.lora_B[adapter_name].to(target_device)
205
+
206
+ # Move scaling
207
+ if hasattr(module, 'scaling') and isinstance(module.scaling, dict) and adapter_name in module.scaling:
208
+ if isinstance(module.scaling[adapter_name], torch.Tensor):
209
+ module.scaling[adapter_name] = module.scaling[adapter_name].to(target_device)
210
+ else:
211
+ # Handle direct attribute case
212
+ if hasattr(module, 'lora_A') and module.lora_A is not None:
213
+ module.lora_A = module.lora_A.to(target_device)
214
+ if hasattr(module, 'lora_B') and module.lora_B is not None:
215
+ module.lora_B = module.lora_B.to(target_device)
216
+ if hasattr(module, 'scaling') and module.scaling is not None:
217
+ if isinstance(module.scaling, torch.Tensor):
218
+ module.scaling = module.scaling.to(target_device)
219
+
220
+ print(f"Moved all LoRA adapters to {target_device}")
221
+ return model
222
+
223
+
224
+ # Function to load a LoRA file
225
+ def load_lora_file(lora_file):
226
+ if not lora_file:
227
+ return None, "No file selected"
228
+
229
+ try:
230
+ # Get the filename from the path
231
+ _, lora_name = os.path.split(lora_file)
232
+
233
+ # Copy the file to the lora directory
234
+ lora_dest = os.path.join(lora_dir, lora_name)
235
+ import shutil
236
+ shutil.copy(lora_file, lora_dest)
237
+
238
+ # Load the LoRA - NOTE: This needs adjustment for multiple transformers
239
+ global current_transformer, lora_names
240
+ if current_transformer is None:
241
+ return None, "Error: No model loaded to apply LoRA to. Generate something first."
242
+
243
+ # ADDED: Unload any existing LoRAs first
244
+ current_transformer = lora_utils.unload_all_loras(current_transformer)
245
+
246
+ current_transformer = lora_utils.load_lora(current_transformer, lora_dir, lora_name)
247
+
248
+ # Add to lora_names if not already there
249
+ lora_base_name = lora_name.split('.')[0]
250
+ if lora_base_name not in lora_names:
251
+ lora_names.append(lora_base_name)
252
+
253
+ # Get the current device of the transformer
254
+ device = next(current_transformer.parameters()).device
255
+
256
+ # Move all LoRA adapters to the same device as the base model
257
+ move_lora_adapters_to_device(current_transformer, device)
258
+
259
+ print(f"Loaded LoRA: {lora_name} to {type(current_transformer).__name__}")
260
+
261
+ # ADDED: Verify LoRA state after loading
262
+ verify_lora_state(current_transformer, "After loading LoRA file")
263
+
264
+ return gr.update(choices=lora_names), f"Successfully loaded LoRA: {lora_name}"
265
+ except Exception as e:
266
+ print(f"Error loading LoRA: {e}")
267
+ return None, f"Error loading LoRA: {e}"
268
+
269
+ @torch.no_grad()
270
+ def worker(
271
+ model_type,
272
+ input_image,
273
+ prompt_text,
274
+ n_prompt,
275
+ seed,
276
+ total_second_length,
277
+ latent_window_size,
278
+ steps,
279
+ cfg,
280
+ gs,
281
+ rs,
282
+ gpu_memory_preservation,
283
+ use_teacache,
284
+ mp4_crf,
285
+ save_metadata,
286
+ blend_sections,
287
+ latent_type,
288
+ selected_loras,
289
+ clean_up_videos,
290
+ lora_values=None,
291
+ job_stream=None,
292
+ output_dir=None,
293
+ metadata_dir=None,
294
+ resolutionW=640, # Add resolution parameter with default value
295
+ resolutionH=640,
296
+ lora_loaded_names=[]
297
+ ):
298
+ global transformer_original, transformer_f1, current_transformer, high_vram
299
+
300
+ # ADDED: Ensure any existing LoRAs are unloaded from the current transformer
301
+ if current_transformer is not None:
302
+ print("Unloading any existing LoRAs before starting new job")
303
+ current_transformer = lora_utils.unload_all_loras(current_transformer)
304
+ import gc
305
+ gc.collect()
306
+ if torch.cuda.is_available():
307
+ torch.cuda.empty_cache()
308
+
309
+ # ADDED: Verify LoRA state at worker start
310
+ verify_lora_state(current_transformer, "Worker start")
311
+
312
+ stream_to_use = job_stream if job_stream is not None else stream
313
+
314
+ total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
315
+ total_latent_sections = int(max(round(total_latent_sections), 1))
316
+
317
+ # Parse the timestamped prompt with boundary snapping and reversing
318
+ # prompt_text should now be the original string from the job queue
319
+ prompt_sections = parse_timestamped_prompt(prompt_text, total_second_length, latent_window_size, model_type)
320
+ job_id = generate_timestamp()
321
+
322
+ stream_to_use.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
323
+
324
+ try:
325
+ if not high_vram:
326
+ # Unload everything *except* the potentially active transformer
327
+ unload_complete_models(text_encoder, text_encoder_2, image_encoder, vae)
328
+ if current_transformer is not None:
329
+ offload_model_from_device_for_memory_preservation(current_transformer, target_device=gpu, preserved_memory_gb=8)
330
+
331
+ # --- Model Loading / Switching ---
332
+ print(f"Worker starting for model type: {model_type}")
333
+ target_transformer_model = None
334
+ other_transformer_model = None
335
+
336
+ if model_type == "Original":
337
+ if transformer_original is None:
338
+ print("Loading Original Transformer...")
339
+ transformer_original = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePackI2V_HY', torch_dtype=torch.bfloat16).cpu()
340
+ transformer_original.eval()
341
+ transformer_original.to(dtype=torch.bfloat16)
342
+ transformer_original.requires_grad_(False)
343
+ if not high_vram:
344
+ DynamicSwapInstaller.install_model(transformer_original, device=gpu)
345
+ print("Original Transformer Loaded.")
346
+ target_transformer_model = transformer_original
347
+ other_transformer_model = transformer_f1
348
+ elif model_type == "F1":
349
+ if transformer_f1 is None:
350
+ print("Loading F1 Transformer...")
351
+ transformer_f1 = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
352
+ transformer_f1.eval()
353
+ transformer_f1.to(dtype=torch.bfloat16)
354
+ transformer_f1.requires_grad_(False)
355
+ if not high_vram:
356
+ DynamicSwapInstaller.install_model(transformer_f1, device=gpu)
357
+ print("F1 Transformer Loaded.")
358
+ target_transformer_model = transformer_f1
359
+ other_transformer_model = transformer_original
360
+ else:
361
+ raise ValueError(f"Unknown model_type: {model_type}")
362
+
363
+ # Unload the *other* model if it exists and we are in low VRAM mode
364
+ if not high_vram and other_transformer_model is not None:
365
+ print(f"Offloading inactive transformer: {type(other_transformer_model).__name__}")
366
+ offload_model_from_device_for_memory_preservation(other_transformer_model, target_device=gpu, preserved_memory_gb=8)
367
+ # Consider fully unloading if memory pressure is extreme:
368
+ # unload_complete_models(other_transformer_model)
369
+ # if model_type == "Original": transformer_f1 = None
370
+ # else: transformer_original = None
371
+
372
+ current_transformer = target_transformer_model # Set the globally accessible current model
373
+
374
+ # ADDED: Ensure the target model has no LoRAs loaded
375
+ print(f"Ensuring {model_type} transformer has no LoRAs loaded")
376
+ current_transformer = lora_utils.unload_all_loras(current_transformer)
377
+ verify_lora_state(current_transformer, "After model selection")
378
+
379
+ # Ensure the target model is on the correct device if in high VRAM mode
380
+ if high_vram and current_transformer.device != gpu:
381
+ print(f"Moving {model_type} transformer to GPU (High VRAM mode)...")
382
+ current_transformer.to(gpu)
383
+
384
+ # Pre-encode all prompts
385
+ stream_to_use.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding all prompts...'))))
386
+
387
+ if not high_vram:
388
+ fake_diffusers_current_device(text_encoder, gpu)
389
+ load_model_as_complete(text_encoder_2, target_device=gpu)
390
+
391
+ # PROMPT BLENDING: Pre-encode all prompts and store in a list in order
392
+ unique_prompts = []
393
+ for section in prompt_sections:
394
+ if section.prompt not in unique_prompts:
395
+ unique_prompts.append(section.prompt)
396
+
397
+ encoded_prompts = {}
398
+ for prompt in unique_prompts:
399
+ llama_vec, clip_l_pooler = encode_prompt_conds(
400
+ prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2
401
+ )
402
+ llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
403
+ encoded_prompts[prompt] = (llama_vec, llama_attention_mask, clip_l_pooler)
404
+
405
+ # PROMPT BLENDING: Build a list of (start_section_idx, prompt) for each prompt
406
+ prompt_change_indices = []
407
+ last_prompt = None
408
+ for idx, section in enumerate(prompt_sections):
409
+ if section.prompt != last_prompt:
410
+ prompt_change_indices.append((idx, section.prompt))
411
+ last_prompt = section.prompt
412
+
413
+ # Encode negative prompt
414
+ if cfg == 1:
415
+ llama_vec_n, llama_attention_mask_n, clip_l_pooler_n = (
416
+ torch.zeros_like(encoded_prompts[prompt_sections[0].prompt][0]),
417
+ torch.zeros_like(encoded_prompts[prompt_sections[0].prompt][1]),
418
+ torch.zeros_like(encoded_prompts[prompt_sections[0].prompt][2])
419
+ )
420
+ else:
421
+ llama_vec_n, clip_l_pooler_n = encode_prompt_conds(
422
+ n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2
423
+ )
424
+ llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
425
+
426
+ # Processing input image
427
+ stream_to_use.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
428
+
429
+ H, W, C = input_image.shape
430
+ height, width = find_nearest_bucket(H, W, resolution=resolutionW)
431
+ input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
432
+
433
+ if save_metadata:
434
+ metadata = PngInfo()
435
+ # prompt_text should be a string here now
436
+ metadata.add_text("prompt", prompt_text)
437
+ metadata.add_text("seed", str(seed))
438
+ Image.fromarray(input_image_np).save(os.path.join(metadata_dir, f'{job_id}.png'), pnginfo=metadata)
439
+
440
+ metadata_dict = {
441
+ "prompt": prompt_text, # Use the original string
442
+ "seed": seed,
443
+ "total_second_length": total_second_length,
444
+ "steps": steps,
445
+ "cfg": cfg,
446
+ "gs": gs,
447
+ "rs": rs,
448
+ "latent_type" : latent_type,
449
+ "blend_sections": blend_sections,
450
+ "latent_window_size": latent_window_size,
451
+ "mp4_crf": mp4_crf,
452
+ "timestamp": time.time(),
453
+ "resolutionW": resolutionW, # Add resolution to metadata
454
+ "resolutionH": resolutionH,
455
+ "model_type": model_type # Add model type to metadata
456
+ }
457
+ # Add LoRA information to metadata if LoRAs are used
458
+ def ensure_list(x):
459
+ if isinstance(x, list):
460
+ return x
461
+ elif x is None:
462
+ return []
463
+ else:
464
+ return [x]
465
+
466
+ selected_loras = ensure_list(selected_loras)
467
+ lora_values = ensure_list(lora_values)
468
+
469
+ if selected_loras and len(selected_loras) > 0:
470
+ lora_data = {}
471
+ for lora_name in selected_loras:
472
+ try:
473
+ idx = lora_loaded_names.index(lora_name)
474
+ weight = lora_values[idx] if lora_values and idx < len(lora_values) else 1.0
475
+ if isinstance(weight, list):
476
+ weight_value = weight[0] if weight and len(weight) > 0 else 1.0
477
+ else:
478
+ weight_value = weight
479
+ lora_data[lora_name] = float(weight_value)
480
+ except ValueError:
481
+ lora_data[lora_name] = 1.0
482
+ metadata_dict["loras"] = lora_data
483
+
484
+ with open(os.path.join(metadata_dir, f'{job_id}.json'), 'w') as f:
485
+ json.dump(metadata_dict, f, indent=2)
486
+ else:
487
+ Image.fromarray(input_image_np).save(os.path.join(metadata_dir, f'{job_id}.png'))
488
+
489
+ input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
490
+ input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
491
+
492
+ # VAE encoding
493
+ stream_to_use.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
494
+
495
+ if not high_vram:
496
+ load_model_as_complete(vae, target_device=gpu)
497
+
498
+ start_latent = vae_encode(input_image_pt, vae)
499
+
500
+ # CLIP Vision
501
+ stream_to_use.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
502
+
503
+ if not high_vram:
504
+ load_model_as_complete(image_encoder, target_device=gpu)
505
+
506
+ image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
507
+ image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
508
+
509
+ # Dtype
510
+ for prompt_key in encoded_prompts:
511
+ llama_vec, llama_attention_mask, clip_l_pooler = encoded_prompts[prompt_key]
512
+ llama_vec = llama_vec.to(current_transformer.dtype)
513
+ clip_l_pooler = clip_l_pooler.to(current_transformer.dtype)
514
+ encoded_prompts[prompt_key] = (llama_vec, llama_attention_mask, clip_l_pooler)
515
+
516
+ llama_vec_n = llama_vec_n.to(current_transformer.dtype)
517
+ clip_l_pooler_n = clip_l_pooler_n.to(current_transformer.dtype)
518
+ image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(current_transformer.dtype)
519
+
520
+ # Sampling
521
+ stream_to_use.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
522
+
523
+ rnd = torch.Generator("cpu").manual_seed(seed)
524
+ num_frames = latent_window_size * 4 - 3
525
+
526
+ if model_type == "Original":
527
+ history_latents = torch.zeros(size=(1, 16, 1 + 2 + 16, height // 8, width // 8), dtype=torch.float32).cpu()
528
+ else: # F1 model
529
+ # F1モードでは初期フレームを用意
530
+ history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
531
+ # 開始フレームをhistory_latentsに追加
532
+ history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
533
+ total_generated_latent_frames = 1 # 最初のフレームを含むので1から開始
534
+
535
+ history_pixels = None
536
+ if model_type == "Original":
537
+ total_generated_latent_frames = 0
538
+ # Original model uses reversed latent paddings
539
+ latent_paddings = reversed(range(total_latent_sections))
540
+ if total_latent_sections > 4:
541
+ latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]
542
+ else: # F1 model
543
+ # F1 model doesn't use latent paddings in the same way
544
+ # We'll use a fixed approach with just 0 for last section and 1 for others
545
+ latent_paddings = [1] * (total_latent_sections - 1) + [0]
546
+
547
+ # PROMPT BLENDING: Track section index
548
+ section_idx = 0
549
+
550
+ # ADDED: Completely unload all loras from the current transformer
551
+ current_transformer = lora_utils.unload_all_loras(current_transformer)
552
+ verify_lora_state(current_transformer, "Before loading LoRAs")
553
+
554
+ # --- LoRA loading and scaling ---
555
+ if selected_loras:
556
+ for lora_name in selected_loras:
557
+ idx = lora_loaded_names.index(lora_name)
558
+ lora_file = None
559
+ for ext in [".safetensors", ".pt"]:
560
+ # Find any file that starts with the lora_name and ends with the extension
561
+ matching_files = [f for f in os.listdir(lora_folder_from_settings)
562
+ if f.startswith(lora_name) and f.endswith(ext)]
563
+ if matching_files:
564
+ lora_file = matching_files[0] # Use the first matching file
565
+ break
566
+ if lora_file:
567
+ print(f"Loading LoRA {lora_file} to {model_type} model")
568
+ current_transformer = lora_utils.load_lora(current_transformer, lora_folder_from_settings, lora_file)
569
+ # Set LoRA strength if provided
570
+ if lora_values and idx < len(lora_values):
571
+ lora_strength = float(lora_values[idx])
572
+ print(f"Setting LoRA {lora_name} strength to {lora_strength}")
573
+ # Set scaling for this LoRA by iterating through modules
574
+ for name, module in current_transformer.named_modules():
575
+ if hasattr(module, 'scaling'):
576
+ if isinstance(module.scaling, dict):
577
+ # Handle ModuleDict case (PEFT implementation)
578
+ if lora_name in module.scaling:
579
+ if isinstance(module.scaling[lora_name], torch.Tensor):
580
+ module.scaling[lora_name] = torch.tensor(
581
+ lora_strength, device=module.scaling[lora_name].device
582
+ )
583
+ else:
584
+ module.scaling[lora_name] = lora_strength
585
+ else:
586
+ # Handle direct attribute case for scaling if needed
587
+ if isinstance(module.scaling, torch.Tensor):
588
+ module.scaling = torch.tensor(
589
+ lora_strength, device=module.scaling.device
590
+ )
591
+ else:
592
+ module.scaling = lora_strength
593
+ else:
594
+ print(f"LoRA file for {lora_name} not found!")
595
+
596
+ # ADDED: Verify LoRA state after loading
597
+ verify_lora_state(current_transformer, "After loading LoRAs")
598
+
599
+ # --- Callback for progress ---
600
+ def callback(d):
601
+ preview = d['denoised']
602
+ preview = vae_decode_fake(preview)
603
+ preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
604
+ preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
605
+
606
+ if stream_to_use.input_queue.top() == 'end':
607
+ stream_to_use.output_queue.push(('end', None))
608
+ raise KeyboardInterrupt('User ends the task.')
609
+
610
+ current_step = d['i'] + 1
611
+ percentage = int(100.0 * current_step / steps)
612
+ current_pos = (total_generated_latent_frames * 4 - 3) / 30
613
+ original_pos = total_second_length - current_pos
614
+ if current_pos < 0: current_pos = 0
615
+ if original_pos < 0: original_pos = 0
616
+
617
+ hint = f'Sampling {current_step}/{steps}'
618
+ if model_type == "Original":
619
+ desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, ' \
620
+ f'Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30):.2f} seconds (FPS-30). ' \
621
+ f'Current position: {current_pos:.2f}s (original: {original_pos:.2f}s). ' \
622
+ f'using prompt: {current_prompt[:256]}...'
623
+ else: # F1 model
624
+ desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, ' \
625
+ f'Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30):.2f} seconds (FPS-30). ' \
626
+ f'Current position: {current_pos:.2f}s. ' \
627
+ f'using prompt: {current_prompt[:256]}...'
628
+
629
+ progress_data = {
630
+ 'preview': preview,
631
+ 'desc': desc,
632
+ 'html': make_progress_bar_html(percentage, hint)
633
+ }
634
+ if job_stream is not None:
635
+ job = job_queue.get_job(job_id)
636
+ if job:
637
+ job.progress_data = progress_data
638
+
639
+ stream_to_use.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
640
+
641
+ # --- Main generation loop ---
642
+ for latent_padding in latent_paddings:
643
+ is_last_section = latent_padding == 0
644
+ latent_padding_size = latent_padding * latent_window_size
645
+
646
+ if stream_to_use.input_queue.top() == 'end':
647
+ stream_to_use.output_queue.push(('end', None))
648
+ return
649
+
650
+ current_time_position = (total_generated_latent_frames * 4 - 3) / 30 # in seconds
651
+ if current_time_position < 0:
652
+ current_time_position = 0.01
653
+
654
+ # Find the appropriate prompt for this section
655
+ current_prompt = prompt_sections[0].prompt # Default to first prompt
656
+ for section in prompt_sections:
657
+ if section.start_time <= current_time_position and (section.end_time is None or current_time_position < section.end_time):
658
+ current_prompt = section.prompt
659
+ break
660
+
661
+ # PROMPT BLENDING: Find if we're in a blend window
662
+ blend_alpha = None
663
+ prev_prompt = current_prompt
664
+ next_prompt = current_prompt
665
+
666
+ # Only try to blend if we have prompt change indices and multiple sections
667
+ if prompt_change_indices and len(prompt_sections) > 1:
668
+ for i, (change_idx, prompt) in enumerate(prompt_change_indices):
669
+ if section_idx < change_idx:
670
+ prev_prompt = prompt_change_indices[i - 1][1] if i > 0 else prompt
671
+ next_prompt = prompt
672
+ blend_start = change_idx
673
+ blend_end = change_idx + blend_sections
674
+ if section_idx >= change_idx and section_idx < blend_end:
675
+ blend_alpha = (section_idx - change_idx + 1) / blend_sections
676
+ break
677
+ elif section_idx == change_idx:
678
+ # At the exact change, start blending
679
+ if i > 0:
680
+ prev_prompt = prompt_change_indices[i - 1][1]
681
+ next_prompt = prompt
682
+ blend_alpha = 1.0 / blend_sections
683
+ else:
684
+ prev_prompt = prompt
685
+ next_prompt = prompt
686
+ blend_alpha = None
687
+ break
688
+ else:
689
+ # After last change, no blending
690
+ prev_prompt = current_prompt
691
+ next_prompt = current_prompt
692
+ blend_alpha = None
693
+
694
+ # Get the encoded prompt for this section
695
+ if blend_alpha is not None and prev_prompt != next_prompt:
696
+ # Blend embeddings
697
+ prev_llama_vec, prev_llama_attention_mask, prev_clip_l_pooler = encoded_prompts[prev_prompt]
698
+ next_llama_vec, next_llama_attention_mask, next_clip_l_pooler = encoded_prompts[next_prompt]
699
+ llama_vec = (1 - blend_alpha) * prev_llama_vec + blend_alpha * next_llama_vec
700
+ llama_attention_mask = prev_llama_attention_mask # usually same
701
+ clip_l_pooler = (1 - blend_alpha) * prev_clip_l_pooler + blend_alpha * next_clip_l_pooler
702
+ print(f"Blending prompts: '{prev_prompt[:30]}...' -> '{next_prompt[:30]}...', alpha={blend_alpha:.2f}")
703
+ else:
704
+ llama_vec, llama_attention_mask, clip_l_pooler = encoded_prompts[current_prompt]
705
+
706
+ original_time_position = total_second_length - current_time_position
707
+ if original_time_position < 0:
708
+ original_time_position = 0
709
+
710
+ print(f'latent_padding_size = {latent_padding_size}, is_last_section = {is_last_section}, '
711
+ f'time position: {current_time_position:.2f}s (original: {original_time_position:.2f}s), '
712
+ f'using prompt: {current_prompt[:60]}...')
713
+
714
+ if model_type == "Original":
715
+ # Original model uses the standard indices approach
716
+ indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0)
717
+ clean_latent_indices_pre, blank_indices, latent_indices, clean_latent_indices_post, clean_latent_2x_indices, clean_latent_4x_indices = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1)
718
+ clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)
719
+ else: # F1 model
720
+ # F1 model uses a different indices approach
721
+ # latent_window_sizeが4.5の場合は特別に5を使用
722
+ effective_window_size = 5 if latent_window_size == 4.5 else int(latent_window_size)
723
+ indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
724
+ clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
725
+ clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
726
+
727
+ print(f"F1 model indices: clean_latent_indices shape={clean_latent_indices.shape}, latent_indices shape={latent_indices.shape}")
728
+
729
+ if model_type == "Original":
730
+ clean_latents_pre = start_latent.to(history_latents)
731
+ clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, :1 + 2 + 16, :, :].split([1, 2, 16], dim=2)
732
+ clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2)
733
+ else: # F1 model
734
+ # For F1, we take the last frames for clean latents
735
+ clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
736
+ # For F1, we prepend the start latent to clean_latents_1x
737
+ clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
738
+
739
+ # Print debug info for F1 model
740
+ print(f"F1 model section {section_idx+1}/{total_latent_sections}, latent_padding={latent_padding}")
741
+
742
+ if not high_vram:
743
+ # Unload VAE etc. before loading transformer
744
+ unload_complete_models(vae, text_encoder, text_encoder_2, image_encoder)
745
+ move_model_to_device_with_memory_preservation(current_transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
746
+ if selected_loras:
747
+ move_lora_adapters_to_device(current_transformer, gpu)
748
+
749
+ if use_teacache:
750
+ current_transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
751
+ else:
752
+ current_transformer.initialize_teacache(enable_teacache=False)
753
+
754
+ generated_latents = sample_hunyuan(
755
+ transformer=current_transformer,
756
+ sampler='unipc',
757
+ width=width,
758
+ height=height,
759
+ frames=num_frames,
760
+ real_guidance_scale=cfg,
761
+ distilled_guidance_scale=gs,
762
+ guidance_rescale=rs,
763
+ num_inference_steps=steps,
764
+ generator=rnd,
765
+ prompt_embeds=llama_vec,
766
+ prompt_embeds_mask=llama_attention_mask,
767
+ prompt_poolers=clip_l_pooler,
768
+ negative_prompt_embeds=llama_vec_n,
769
+ negative_prompt_embeds_mask=llama_attention_mask_n,
770
+ negative_prompt_poolers=clip_l_pooler_n,
771
+ device=gpu,
772
+ dtype=torch.bfloat16,
773
+ image_embeddings=image_encoder_last_hidden_state,
774
+ latent_indices=latent_indices,
775
+ clean_latents=clean_latents,
776
+ clean_latent_indices=clean_latent_indices,
777
+ clean_latents_2x=clean_latents_2x,
778
+ clean_latent_2x_indices=clean_latent_2x_indices,
779
+ clean_latents_4x=clean_latents_4x,
780
+ clean_latent_4x_indices=clean_latent_4x_indices,
781
+ callback=callback,
782
+ )
783
+
784
+ total_generated_latent_frames += int(generated_latents.shape[2])
785
+ if model_type == "Original":
786
+ history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2)
787
+ else: # F1 model
788
+ # For F1, we append new frames to the end
789
+ history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
790
+
791
+ if not high_vram:
792
+ if selected_loras:
793
+ move_lora_adapters_to_device(current_transformer, cpu)
794
+ offload_model_from_device_for_memory_preservation(current_transformer, target_device=gpu, preserved_memory_gb=8)
795
+ load_model_as_complete(vae, target_device=gpu)
796
+
797
+ if model_type == "Original":
798
+ real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :]
799
+ else: # F1 model
800
+ # For F1, we take frames from the end
801
+ real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
802
+
803
+ if history_pixels is None:
804
+ history_pixels = vae_decode(real_history_latents, vae).cpu()
805
+ else:
806
+ section_latent_frames = (latent_window_size * 2 + 1) if is_last_section else (latent_window_size * 2)
807
+ overlapped_frames = latent_window_size * 4 - 3
808
+
809
+ if model_type == "Original":
810
+ current_pixels = vae_decode(real_history_latents[:, :, :section_latent_frames], vae).cpu()
811
+ history_pixels = soft_append_bcthw(current_pixels, history_pixels, overlapped_frames)
812
+ else: # F1 model
813
+ # For F1, we take frames from the end
814
+ print(f"F1 model section {section_idx+1}/{total_latent_sections}, section_latent_frames={section_latent_frames}")
815
+ print(f"F1 model real_history_latents shape: {real_history_latents.shape}, taking last {section_latent_frames} frames")
816
+
817
+ # Get the frames from the end of real_history_latents
818
+ current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
819
+
820
+ print(f"F1 model current_pixels shape: {current_pixels.shape}, history_pixels shape: {history_pixels.shape if history_pixels is not None else 'None'}")
821
+
822
+ # For F1 model, history_pixels is first, current_pixels is second
823
+ history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
824
+
825
+ print(f"F1 model after append, history_pixels shape: {history_pixels.shape}")
826
+
827
+ if not high_vram:
828
+ unload_complete_models()
829
+
830
+ output_filename = os.path.join(output_dir, f'{job_id}_{total_generated_latent_frames}.mp4')
831
+ save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
832
+ print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
833
+ stream_to_use.output_queue.push(('file', output_filename))
834
+
835
+ if is_last_section:
836
+ break
837
+
838
+ section_idx += 1 # PROMPT BLENDING: increment section index
839
+
840
+ # ADDED: Unload all LoRAs after generation completed
841
+ if selected_loras:
842
+ print("Unloading all LoRAs after generation completed")
843
+ current_transformer = lora_utils.unload_all_loras(current_transformer)
844
+ verify_lora_state(current_transformer, "After generation completed")
845
+ import gc
846
+ gc.collect()
847
+ if torch.cuda.is_available():
848
+ torch.cuda.empty_cache()
849
+
850
+ except:
851
+ traceback.print_exc()
852
+ # ADDED: Unload all LoRAs after error
853
+ if current_transformer is not None and selected_loras:
854
+ print("Unloading all LoRAs after error")
855
+ current_transformer = lora_utils.unload_all_loras(current_transformer)
856
+ verify_lora_state(current_transformer, "After error")
857
+ import gc
858
+ gc.collect()
859
+ if torch.cuda.is_available():
860
+ torch.cuda.empty_cache()
861
+
862
+ stream_to_use.output_queue.push(('error', f"Error during generation: {traceback.format_exc()}"))
863
+ if not high_vram:
864
+ # Ensure all models including the potentially active transformer are unloaded on error
865
+ unload_complete_models(
866
+ text_encoder, text_encoder_2, image_encoder, vae, current_transformer
867
+ )
868
+
869
+ if clean_up_videos:
870
+ try:
871
+ video_files = [
872
+ f for f in os.listdir(output_dir)
873
+ if f.startswith(f"{job_id}_") and f.endswith(".mp4")
874
+ ]
875
+ print(f"Video files found for cleanup: {video_files}")
876
+ if video_files:
877
+ def get_frame_count(filename):
878
+ try:
879
+ # Handles filenames like jobid_123.mp4
880
+ return int(filename.replace(f"{job_id}_", "").replace(".mp4", ""))
881
+ except Exception:
882
+ return -1
883
+ video_files_sorted = sorted(video_files, key=get_frame_count)
884
+ print(f"Sorted video files: {video_files_sorted}")
885
+ final_video = video_files_sorted[-1]
886
+ for vf in video_files_sorted[:-1]:
887
+ full_path = os.path.join(output_dir, vf)
888
+ try:
889
+ os.remove(full_path)
890
+ print(f"Deleted intermediate video: {full_path}")
891
+ except Exception as e:
892
+ print(f"Failed to delete {full_path}: {e}")
893
+ except Exception as e:
894
+ print(f"Error during video cleanup: {e}")
895
+
896
+ # ADDED: Final verification of LoRA state
897
+ verify_lora_state(current_transformer, "Worker end")
898
+
899
+ stream_to_use.output_queue.push(('end', None))
900
+ return
901
+
902
+
903
+
904
+ # Set the worker function for the job queue
905
+ job_queue.set_worker_function(worker)
906
+
907
+
908
+ def process(
909
+ model_type,
910
+ input_image,
911
+ prompt_text,
912
+ n_prompt,
913
+ seed,
914
+ total_second_length,
915
+ latent_window_size,
916
+ steps,
917
+ cfg,
918
+ gs,
919
+ rs,
920
+ gpu_memory_preservation,
921
+ use_teacache,
922
+ mp4_crf,
923
+ save_metadata,
924
+ blend_sections,
925
+ latent_type,
926
+ clean_up_videos,
927
+ selected_loras,
928
+ resolutionW,
929
+ resolutionH,
930
+ lora_loaded_names,
931
+ *lora_values
932
+ ):
933
+
934
+ # Create a blank black image if no
935
+ # Create a default image based on the selected latent_type
936
+ if input_image is None:
937
+ default_height, default_width = resolutionH, resolutionW
938
+ if latent_type == "White":
939
+ # Create a white image
940
+ input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
941
+ print("No input image provided. Using a blank white image.")
942
+
943
+ elif latent_type == "Noise":
944
+ # Create a noise image
945
+ input_image = np.random.randint(0, 256, (default_height, default_width, 3), dtype=np.uint8)
946
+ print("No input image provided. Using a random noise image.")
947
+
948
+ elif latent_type == "Green Screen":
949
+ # Create a green screen image with standard chroma key green (0, 177, 64)
950
+ input_image = np.zeros((default_height, default_width, 3), dtype=np.uint8)
951
+ input_image[:, :, 1] = 177 # Green channel
952
+ input_image[:, :, 2] = 64 # Blue channel
953
+ # Red channel remains 0
954
+ print("No input image provided. Using a standard chroma key green screen.")
955
+
956
+ else: # Default to "Black" or any other value
957
+ # Create a black image
958
+ input_image = np.zeros((default_height, default_width, 3), dtype=np.uint8)
959
+ print(f"No input image provided. Using a blank black image (latent_type: {latent_type}).")
960
+
961
+
962
+ # Create job parameters
963
+ job_params = {
964
+ 'model_type': model_type,
965
+ 'input_image': input_image.copy(), # Make a copy to avoid reference issues
966
+ 'prompt_text': prompt_text,
967
+ 'n_prompt': n_prompt,
968
+ 'seed': seed,
969
+ 'total_second_length': total_second_length,
970
+ 'latent_window_size': latent_window_size,
971
+ 'latent_type': latent_type,
972
+ 'steps': steps,
973
+ 'cfg': cfg,
974
+ 'gs': gs,
975
+ 'rs': rs,
976
+ 'blend_sections': blend_sections,
977
+ 'gpu_memory_preservation': gpu_memory_preservation,
978
+ 'use_teacache': use_teacache,
979
+ 'mp4_crf': mp4_crf,
980
+ 'save_metadata': save_metadata,
981
+ 'selected_loras': selected_loras,
982
+ 'clean_up_videos': clean_up_videos,
983
+ 'output_dir': settings.get("output_dir"),
984
+ 'metadata_dir': settings.get("metadata_dir"),
985
+ 'resolutionW': resolutionW, # Add resolution parameter
986
+ 'resolutionH': resolutionH,
987
+ 'lora_loaded_names': lora_loaded_names
988
+ }
989
+
990
+ # Add LoRA values if provided - extract them from the tuple
991
+ if lora_values:
992
+ # Convert tuple to list
993
+ lora_values_list = list(lora_values)
994
+ job_params['lora_values'] = lora_values_list
995
+
996
+ # Add job to queue
997
+ job_id = job_queue.add_job(job_params)
998
+ print(f"Added job {job_id} to queue")
999
+
1000
+ queue_status = update_queue_status()
1001
+ # Return immediately after adding to queue
1002
+ return None, job_id, None, '', f'Job added to queue. Job ID: {job_id}', gr.update(interactive=True), gr.update(interactive=True)
1003
+
1004
+
1005
+
1006
+ def end_process():
1007
+ """Cancel the current running job and update the queue status"""
1008
+ print("Cancelling current job")
1009
+ with job_queue.lock:
1010
+ if job_queue.current_job:
1011
+ job_id = job_queue.current_job.id
1012
+ print(f"Cancelling job {job_id}")
1013
+
1014
+ # Send the end signal to the job's stream
1015
+ if job_queue.current_job.stream:
1016
+ job_queue.current_job.stream.input_queue.push('end')
1017
+
1018
+ # Mark the job as cancelled
1019
+ job_queue.current_job.status = JobStatus.CANCELLED
1020
+ job_queue.current_job.completed_at = time.time() # Set completion time
1021
+
1022
+ # Force an update to the queue status
1023
+ return update_queue_status()
1024
+
1025
+
1026
+ def update_queue_status():
1027
+ """Update queue status and refresh job positions"""
1028
+ jobs = job_queue.get_all_jobs()
1029
+ for job in jobs:
1030
+ if job.status == JobStatus.PENDING:
1031
+ job.queue_position = job_queue.get_queue_position(job.id)
1032
+
1033
+ # Make sure to update current running job info
1034
+ if job_queue.current_job:
1035
+ # Make sure the running job is showing status = RUNNING
1036
+ job_queue.current_job.status = JobStatus.RUNNING
1037
+
1038
+ return format_queue_status(jobs)
1039
+
1040
+
1041
+ def monitor_job(job_id):
1042
+ """
1043
+ Monitor a specific job and update the UI with the latest video segment as soon as it's available.
1044
+ """
1045
+ if not job_id:
1046
+ yield None, None, None, '', 'No job ID provided', gr.update(interactive=True), gr.update(interactive=True)
1047
+ return
1048
+
1049
+ last_video = None # Track the last video file shown
1050
+
1051
+ while True:
1052
+ job = job_queue.get_job(job_id)
1053
+ if not job:
1054
+ yield None, job_id, None, '', 'Job not found', gr.update(interactive=True), gr.update(interactive=True)
1055
+ return
1056
+
1057
+ # If a new video file is available, yield it immediately
1058
+ if job.result and job.result != last_video:
1059
+ last_video = job.result
1060
+ # You can also update preview/progress here if desired
1061
+ yield last_video, job_id, gr.update(visible=True), '', '', gr.update(interactive=True), gr.update(interactive=True)
1062
+
1063
+ # Handle job status and progress
1064
+ if job.status == JobStatus.PENDING:
1065
+ position = job_queue.get_queue_position(job_id)
1066
+ yield last_video, job_id, gr.update(visible=True), '', f'Waiting in queue. Position: {position}', gr.update(interactive=True), gr.update(interactive=True)
1067
+
1068
+ elif job.status == JobStatus.RUNNING:
1069
+ if job.progress_data and 'preview' in job.progress_data:
1070
+ preview = job.progress_data.get('preview')
1071
+ desc = job.progress_data.get('desc', '')
1072
+ html = job.progress_data.get('html', '')
1073
+ yield last_video, job_id, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=True), gr.update(interactive=True)
1074
+ else:
1075
+ yield last_video, job_id, gr.update(visible=True), '', 'Processing...', gr.update(interactive=True), gr.update(interactive=True)
1076
+
1077
+ elif job.status == JobStatus.COMPLETED:
1078
+ # Show the final video
1079
+ yield last_video, job_id, gr.update(visible=True), '', '', gr.update(interactive=True), gr.update(interactive=True)
1080
+ break
1081
+
1082
+ elif job.status == JobStatus.FAILED:
1083
+ yield last_video, job_id, gr.update(visible=True), '', f'Error: {job.error}', gr.update(interactive=True), gr.update(interactive=True)
1084
+ break
1085
+
1086
+ elif job.status == JobStatus.CANCELLED:
1087
+ yield last_video, job_id, gr.update(visible=True), '', 'Job cancelled', gr.update(interactive=True), gr.update(interactive=True)
1088
+ break
1089
+
1090
+ # Wait a bit before checking again
1091
+ time.sleep(0.5)
1092
+
1093
+
1094
+ # Set Gradio temporary directory from settings
1095
+ os.environ["GRADIO_TEMP_DIR"] = settings.get("gradio_temp_dir")
1096
+
1097
+ # Create the interface
1098
+ interface = create_interface(
1099
+ process_fn=process,
1100
+ monitor_fn=monitor_job,
1101
+ end_process_fn=end_process,
1102
+ update_queue_status_fn=update_queue_status,
1103
+ load_lora_file_fn=load_lora_file,
1104
+ job_queue=job_queue,
1105
+ settings=settings,
1106
+ lora_names=lora_names # Explicitly pass the found LoRA names
1107
+ )
1108
+
1109
+ # Launch the interface
1110
+ interface.launch(
1111
+ server_name=args.server,
1112
+ server_port=args.port,
1113
+ share=args.share,
1114
+ inbrowser=args.inbrowser
1115
+ )