AmyTheKamiwazaGirl2001 commited on
Commit
99c376f
1 Parent(s): 1852651

Upload 37 files

Browse files
.gitattributes CHANGED
@@ -46,3 +46,4 @@ static/videos/shiba.mp4 filter=lfs diff=lfs merge=lfs -text
46
  static/videos/steve.mp4 filter=lfs diff=lfs merge=lfs -text
47
  static/videos/teaser.mp4 filter=lfs diff=lfs merge=lfs -text
48
  static/videos/toby.mp4 filter=lfs diff=lfs merge=lfs -text
 
 
46
  static/videos/steve.mp4 filter=lfs diff=lfs merge=lfs -text
47
  static/videos/teaser.mp4 filter=lfs diff=lfs merge=lfs -text
48
  static/videos/toby.mp4 filter=lfs diff=lfs merge=lfs -text
49
+ ebsynth/ebsynth.dll filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### macOS template
2
+ # General
3
+ .DS_Store
4
+ .AppleDouble
5
+ .LSOverride
6
+
7
+ # Icon must end with two \r
8
+ Icon
9
+
10
+ # Thumbnails
11
+ ._*
12
+
13
+ # Files that might appear in the root of a volume
14
+ .DocumentRevisions-V100
15
+ .fseventsd
16
+ .Spotlight-V100
17
+ .TemporaryItems
18
+ .Trashes
19
+ .VolumeIcon.icns
20
+ .com.apple.timemachine.donotpresent
21
+
22
+ # Directories potentially created on remote AFP share
23
+ .AppleDB
24
+ .AppleDesktop
25
+ Network Trash Folder
26
+ Temporary Items
27
+ .apdisk
28
+
29
+ ### Python template
30
+ # Byte-compiled / optimized / DLL files
31
+ __pycache__/
32
+ *.py[cod]
33
+ *$py.class
34
+
35
+ # C extensions
36
+ *.so
37
+
38
+ # Distribution / packaging
39
+ .Python
40
+ build/
41
+ develop-eggs/
42
+ dist/
43
+ downloads/
44
+ eggs/
45
+ .eggs/
46
+ lib/
47
+ lib64/
48
+ parts/
49
+ sdist/
50
+ var/
51
+ wheels/
52
+ share/python-wheels/
53
+ *.egg-info/
54
+ .installed.cfg
55
+ *.egg
56
+ MANIFEST
57
+
58
+ # PyInstaller
59
+ # Usually these files are written by a python script from a template
60
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
61
+ *.manifest
62
+ *.spec
63
+
64
+ # Installer logs
65
+ pip-log.txt
66
+ pip-delete-this-directory.txt
67
+
68
+ # Unit test / coverage reports
69
+ htmlcov/
70
+ .tox/
71
+ .nox/
72
+ .coverage
73
+ .coverage.*
74
+ .cache
75
+ nosetests.xml
76
+ coverage.xml
77
+ *.cover
78
+ *.py,cover
79
+ .hypothesis/
80
+ .pytest_cache/
81
+ cover/
82
+
83
+ # Translations
84
+ *.mo
85
+ *.pot
86
+
87
+ # Django stuff:
88
+ *.log
89
+ local_settings.py
90
+ db.sqlite3
91
+ db.sqlite3-journal
92
+
93
+ # Flask stuff:
94
+ instance/
95
+ .webassets-cache
96
+
97
+ # Scrapy stuff:
98
+ .scrapy
99
+
100
+ # Sphinx documentation
101
+ docs/_build/
102
+
103
+ # PyBuilder
104
+ .pybuilder/
105
+ target/
106
+
107
+ # Jupyter Notebook
108
+ .ipynb_checkpoints
109
+
110
+ # IPython
111
+ profile_default/
112
+ ipython_config.py
113
+
114
+ # pyenv
115
+ # For a library or package, you might want to ignore these files since the code is
116
+ # intended to run in multiple environments; otherwise, check them in:
117
+ # .python-version
118
+
119
+ # pipenv
120
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
121
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
122
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
123
+ # install all needed dependencies.
124
+ #Pipfile.lock
125
+
126
+ # poetry
127
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
128
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
129
+ # commonly ignored for libraries.
130
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
131
+ #poetry.lock
132
+
133
+ # pdm
134
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
135
+ #pdm.lock
136
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
137
+ # in version control.
138
+ # https://pdm.fming.dev/#use-with-ide
139
+ .pdm.toml
140
+
141
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
142
+ __pypackages__/
143
+
144
+ # Celery stuff
145
+ celerybeat-schedule
146
+ celerybeat.pid
147
+
148
+ # SageMath parsed files
149
+ *.sage.py
150
+
151
+ # Environments
152
+ .env
153
+ .venv
154
+ env/
155
+ venv/
156
+ ENV/
157
+ env.bak/
158
+ venv.bak/
159
+
160
+ # Spyder project settings
161
+ .spyderproject
162
+ .spyproject
163
+
164
+ # Rope project settings
165
+ .ropeproject
166
+
167
+ # mkdocs documentation
168
+ /site
169
+
170
+ # mypy
171
+ .mypy_cache/
172
+ .dmypy.json
173
+ dmypy.json
174
+
175
+ # Pyre type checker
176
+ .pyre/
177
+
178
+ # pytype static type analyzer
179
+ .pytype/
180
+
181
+ # Cython debug symbols
182
+ cython_debug/
183
+
184
+ # PyCharm
185
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
186
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
187
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
188
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
189
+ #.idea/
190
+
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # 默认忽略的文件
2
+ /shelf/
3
+ /workspace.xml
4
+ # 基于编辑器的 HTTP 客户端请求
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
5
+ <option name="ignoredIdentifiers">
6
+ <list>
7
+ <option value="scripts.*" />
8
+ </list>
9
+ </option>
10
+ </inspection_tool>
11
+ </profile>
12
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (venv)" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/sd-webui-mov2mov.iml" filepath="$PROJECT_DIR$/.idea/sd-webui-mov2mov.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/sd-webui-mov2mov.iml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="Python 3.10 (venv)" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ <component name="PyDocumentationSettings">
9
+ <option name="format" value="PLAIN" />
10
+ <option name="myDocStringFormat" value="Plain" />
11
+ </component>
12
+ </module>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
CHANGELOG.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### 2023/9/30
2
+ 1. automatic video fps parsing
3
+ 2. Video editing features.
4
+ 1. Customizable selection of keyframes or automatic generation of keyframes.
5
+ 2. Backtrack keyframe tag.
6
+ 3. automatically synthesize video based on keyframes via Ezsynth(https://github.com/Trentonom0r3/Ezsynth).
7
+ 4. Currently, only the Windows system is supported. If your system does not support it, you can close this tab.
8
+
9
+ ### 2023/9/24
10
+ 1. Move the tab behind img2img.
11
+ 2. Fix the issue of video synthesis failure on the Mac system.
12
+ 3. Fix the problem of refiner not taking effect
13
+
14
+ ### 2023/9/23
15
+ 1. Fixed the issue where the tab is not displayed in the sd1.6 version.
16
+ 2. Inference of video width and height.
17
+ 3. Support for Refiner.
18
+ 4. Temporarily removed modnet functionality.
19
+ 5. Temporarily removed the function to add prompts frame by frame (ps: I believe there's a better approach, will add in the next version).
20
+ 6. Changed video synthesis from ffmpeg to imageio.
CHANGELOG_CN.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### 2023/9/30
2
+ 1. 自动解析视频的fps
3
+ 2. 视频编辑功能.
4
+ 1. 可以自定义选择关键帧或者自动生成关键帧
5
+ 2. 反推关键帧tag
6
+ 3. 通过Ezsynth(https://github.com/Trentonom0r3/Ezsynth)自动根据关键帧合成视频.
7
+ 4. 目前只有windows系统可以使用,如果您系统不支持,可以关闭该选项卡.
8
+
9
+ ### 2023/9/26
10
+ 1. 编辑关键帧
11
+ 2. 自动反推关键帧
12
+
13
+ ### 2023/9/24
14
+ 1. 移动选项卡至img2img后面
15
+ 2. 修复mac系统下,视频合成失败的问题
16
+ 3. 修复refiner不生效的问题
17
+
18
+
19
+ ### 2023/9/23
20
+
21
+ 1. 修复sd1.6版本选项卡不显示的问题.
22
+ 2. 推理视频宽高
23
+ 3. 支持Refiner
24
+ 4. 暂时移除modnet功能.
25
+ 5. 暂时移除逐帧添加prompt功能(ps:我觉得有更好的方式,下个版本添加)
26
+ 6. 修改合成视频的ffmpeg为imageio
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Scholar0
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,27 +1,65 @@
1
- ---
2
- title: Mov2Mov
3
- emoji: 🧠
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: static
7
- pinned: false
8
- license: mit
9
- short_description: Mov2Mov
10
- ---
11
-
12
- # Nerfies
13
-
14
- This is the repository that contains source code for the [Nerfies website](https://nerfies.github.io).
15
-
16
- If you find Nerfies useful for your work please cite:
17
- ```
18
- @article{park2021nerfies
19
- author = {Park, Keunhong and Sinha, Utkarsh and Barron, Jonathan T. and Bouaziz, Sofien and Goldman, Dan B and Seitz, Steven M. and Martin-Brualla, Ricardo},
20
- title = {Nerfies: Deformable Neural Radiance Fields},
21
- journal = {ICCV},
22
- year = {2021},
23
- }
24
- ```
25
-
26
- # Website License
27
- <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">Creative Commons Attribution-ShareAlike 4.0 International License</a>.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [English](README.md) | [中文简体](README_CN.md)
2
+
3
+ ## Mov2mov This is the Mov2mov plugin for Automatic1111/stable-diffusion-webui.
4
+
5
+ ![img.png](images/2.jpg)
6
+ ![img1.png](images/1.png)
7
+
8
+
9
+
10
+ Features:
11
+ - Directly process frames from videos
12
+ - Package into a video after processing
13
+ - Video Editing(beta)
14
+ - Dramatically reduce video flicker by keyframe compositing!
15
+ - You can customize the keyframe selection or auto-generate keyframes.
16
+ - Backpropel keyframe tag
17
+ - Currently only available for windows, if your system does not support, you can turn off this tab.
18
+
19
+ Also, mov2mov will work better with the [bg-mask](https://github.com/Scholar01/sd-webui-bg-mask) plugin 😃
20
+
21
+ # Table of Contents
22
+
23
+
24
+ - [Table of Contents](#table-of-contents)
25
+ - [Usage Regulations](#usage-regulations)
26
+ - [Installation](#installation)
27
+ - [Change Log](#change-log)
28
+ - [Instructions](#instructions)
29
+ - [Thanks](#thanks)
30
+ ## Usage Regulations
31
+
32
+ 1. Please resolve the authorization issues of the video source on your own. Any problems caused by using unauthorized videos for conversion must be borne by the user. It has nothing to do with mov2mov!
33
+ 2. Any video made with mov2mov and published on video platforms must clearly specify the source of the video used for conversion in the description. For example, if you use someone else's video and convert it through AI, you must provide a clear link to the original video; if you use your own video, you must also state this in the description.
34
+ 3. All copyright issues caused by the input source must be borne by the user. Note that many videos explicitly state that they cannot be reproduced or copied!
35
+ 4. Please strictly comply with national laws and regulations to ensure that the content is legal and compliant. Any legal responsibility caused by using this plugin must be borne by the user. It has nothing to do with mov2mov!
36
+
37
+ ## Installation
38
+
39
+ 1. Open the Extensions tab.
40
+ 2. Click on Install from URL.
41
+ 3. Enter the URL for the extension's git repository.
42
+ 4. Click Install.
43
+ 5. Restart WebUI.
44
+
45
+
46
+
47
+ ## Change Log
48
+
49
+ [Change Log](CHANGELOG.md)
50
+
51
+
52
+
53
+ ## Instructions
54
+
55
+ - Video tutorials:
56
+ - [https://www.bilibili.com/video/BV1Mo4y1a7DF](https://www.bilibili.com/video/BV1Mo4y1a7DF)
57
+ - [https://www.bilibili.com/video/BV1rY4y1C7Q5](https://www.bilibili.com/video/BV1rY4y1C7Q5)
58
+ - QQ channel: [https://pd.qq.com/s/akxpjjsgd](https://pd.qq.com/s/akxpjjsgd)
59
+ - Discord: [https://discord.gg/hUzF3kQKFW](https://discord.gg/hUzF3kQKFW)
60
+
61
+ ## Thanks
62
+
63
+ - modnet-entry: [https://github.com/RimoChan/modnet-entry](https://github.com/RimoChan/modnet-entry)
64
+ - MODNet: [https://github.com/ZHKKKe/MODNet](https://github.com/ZHKKKe/MODNet)
65
+ - Ezsynth: [https://github.com/Trentonom0r3/Ezsynth](https://github.com/Trentonom0r3/Ezsynth)
README_CN.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [English](README.md) | [中文简体](README_CN.md)
2
+
3
+ # Mov2mov 适用于Automatic1111/stable-diffusion-webui 的 Mov2mov 插件。
4
+
5
+ ![img.png](images/2.jpg)
6
+ ![img1.png](images/1.png)
7
+
8
+
9
+ 功能:
10
+ - 直接从视频逐帧处理
11
+ - 处理完成后打包成视频
12
+ - 视频编辑(beta)
13
+ - 通过关键帧合成的方式,大幅度减少视频闪烁!
14
+ - 可以自定义选择关键帧或者自动生成关键帧
15
+ - 反推关键帧tag
16
+ - 目前只有windows系统可以使用,如果您系统不支持,可以关闭该选项卡.
17
+
18
+
19
+ 另外,mov2mov与[bg-mask](https://github.com/Scholar01/sd-webui-bg-mask)插件一起工作会更好😃
20
+
21
+ # 目录
22
+
23
+ - [Mov2mov 适用于Automatic1111/stable-diffusion-webui 的 Mov2mov 插件。](#mov2mov-适用于automatic1111stable-diffusion-webui-的-mov2mov-插件)
24
+ - [目录](#目录)
25
+ - [使用规约](#使用规约)
26
+ - [安装方法](#安装方法)
27
+ - [更新日志](#更新日志)
28
+ - [说明](#说明)
29
+ - [感谢](#感谢)
30
+
31
+ ## 使用规约
32
+
33
+ 1. 请自行解决视频来源的授权问题,任何由于使用非授权视频进行转换造成的问题,需自行承担全部责任和一切后果,于mov2mov无关!
34
+ 2. 任何发布到视频平台的基于mov2mov制作的视频,都必须要在简介中明确指明用于转换的视频来源。例如:使用他人发布的视频,通过ai进行转换的,必须要给出明确的原视频链接;若使用的是自己/自己的视频,也必须在简介加以说明。
35
+ 3. 由输入源造成的侵权问题需自行承担全部责任和一切后果。注意,很多视频明确指出不可转载,复制!
36
+ 4. 请严格遵守国家相关法律法规,确保内容合法合规。任何由于使用本插件造成的法律责任,需自行承担全部责任和一切后果,于mov2mov无关!
37
+
38
+ ## 安装方法
39
+
40
+ 1. 打开扩展(Extension)标签。
41
+ 2. 点击从网址安装(Install from URL)
42
+ 3. 在扩展的 git 仓库网址(URL for extension's git repository)处输入
43
+ 4. 点击安装(Install)
44
+ 5. 重启 WebUI
45
+
46
+
47
+ ## 更新日志
48
+
49
+ [更新日志](CHANGELOG_CN.md)
50
+
51
+ ## 说明
52
+
53
+ - 视频教程:
54
+ - [https://www.bilibili.com/video/BV1Mo4y1a7DF](https://www.bilibili.com/video/BV1Mo4y1a7DF)
55
+ - [https://www.bilibili.com/video/BV1rY4y1C7Q5](https://www.bilibili.com/video/BV1rY4y1C7Q5)
56
+ - QQ频道: [https://pd.qq.com/s/akxpjjsgd](https://pd.qq.com/s/akxpjjsgd)
57
+ - Discord: [https://discord.gg/hUzF3kQKFW](https://discord.gg/hUzF3kQKFW)
58
+
59
+ ## 感谢
60
+
61
+ - modnet-entry: [https://github.com/RimoChan/modnet-entry](https://github.com/RimoChan/modnet-entry)
62
+ - MODNet: [https://github.com/ZHKKKe/MODNet](https://github.com/ZHKKKe/MODNet)
63
+ - Ezsynth: [https://github.com/Trentonom0r3/Ezsynth](https://github.com/Trentonom0r3/Ezsynth)
ebsynth/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+
2
+ from .ebsynth_generate import EbsynthGenerate, Keyframe, Sequence, EbSynthTask
3
+
4
+ AFTER_DETAILER = "ADetailer"
5
+
6
+ __all__ = [
7
+ "EbsynthGenerate", "Keyframe", "Sequence", "EbSynthTask"
8
+ ]
ebsynth/_ebsynth.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # fork for Ezsynth(https://github.com/Trentonom0r3/Ezsynth)
2
+
3
+ import os
4
+ import sys
5
+ from ctypes import *
6
+ from pathlib import Path
7
+
8
+ import cv2
9
+ import numpy as np
10
+
11
+ libebsynth = None
12
+ cached_buffer = {}
13
+
14
+ EBSYNTH_BACKEND_CPU = 0x0001
15
+ EBSYNTH_BACKEND_CUDA = 0x0002
16
+ EBSYNTH_BACKEND_AUTO = 0x0000
17
+ EBSYNTH_MAX_STYLE_CHANNELS = 8
18
+ EBSYNTH_MAX_GUIDE_CHANNELS = 24
19
+ EBSYNTH_VOTEMODE_PLAIN = 0x0001 # weight = 1
20
+ EBSYNTH_VOTEMODE_WEIGHTED = 0x0002 # weight = 1/(1+error)
21
+
22
+
23
+ def _normalize_img_shape(img):
24
+ img_len = len(img.shape)
25
+ if img_len == 2:
26
+ sh, sw = img.shape
27
+ sc = 0
28
+ elif img_len == 3:
29
+ sh, sw, sc = img.shape
30
+
31
+ if sc == 0:
32
+ sc = 1
33
+ img = img[..., np.newaxis]
34
+ return img
35
+
36
+
37
+ def run(img_style, guides,
38
+ patch_size=5,
39
+ num_pyramid_levels=-1,
40
+ num_search_vote_iters=6,
41
+ num_patch_match_iters=4,
42
+ stop_threshold=5,
43
+ uniformity_weight=3500.0,
44
+ extraPass3x3=False,
45
+ ):
46
+ if patch_size < 3:
47
+ raise ValueError("patch_size is too small")
48
+ if patch_size % 2 == 0:
49
+ raise ValueError("patch_size must be an odd number")
50
+ if len(guides) == 0:
51
+ raise ValueError("at least one guide must be specified")
52
+
53
+ global libebsynth
54
+ if libebsynth is None:
55
+ if sys.platform[0:3] == 'win':
56
+ libebsynth_path = str(Path(__file__).parent / 'ebsynth.dll')
57
+ libebsynth = CDLL(libebsynth_path)
58
+ else:
59
+ # todo: implement for linux
60
+ pass
61
+
62
+ if libebsynth is not None:
63
+ libebsynth.ebsynthRun.argtypes = ( \
64
+ c_int,
65
+ c_int,
66
+ c_int,
67
+ c_int,
68
+ c_int,
69
+ c_void_p,
70
+ c_void_p,
71
+ c_int,
72
+ c_int,
73
+ c_void_p,
74
+ c_void_p,
75
+ POINTER(c_float),
76
+ POINTER(c_float),
77
+ c_float,
78
+ c_int,
79
+ c_int,
80
+ c_int,
81
+ POINTER(c_int),
82
+ POINTER(c_int),
83
+ POINTER(c_int),
84
+ c_int,
85
+ c_void_p,
86
+ c_void_p
87
+ )
88
+
89
+ if libebsynth is None:
90
+ return img_style
91
+
92
+ img_style = _normalize_img_shape(img_style)
93
+ sh, sw, sc = img_style.shape
94
+ t_h, t_w, t_c = 0, 0, 0
95
+
96
+ if sc > EBSYNTH_MAX_STYLE_CHANNELS:
97
+ raise ValueError(f"error: too many style channels {sc}, maximum number is {EBSYNTH_MAX_STYLE_CHANNELS}")
98
+
99
+ guides_source = []
100
+ guides_target = []
101
+ guides_weights = []
102
+
103
+ for i in range(len(guides)):
104
+ source_guide, target_guide, guide_weight = guides[i]
105
+ source_guide = _normalize_img_shape(source_guide)
106
+ target_guide = _normalize_img_shape(target_guide)
107
+ s_h, s_w, s_c = source_guide.shape
108
+ nt_h, nt_w, nt_c = target_guide.shape
109
+
110
+ if s_h != sh or s_w != sw:
111
+ raise ValueError("guide source and style resolution must match style resolution.")
112
+
113
+ if t_c == 0:
114
+ t_h, t_w, t_c = nt_h, nt_w, nt_c
115
+ elif nt_h != t_h or nt_w != t_w:
116
+ raise ValueError("guides target resolutions must be equal")
117
+
118
+ if s_c != nt_c:
119
+ raise ValueError("guide source and target channels must match exactly.")
120
+
121
+ guides_source.append(source_guide)
122
+ guides_target.append(target_guide)
123
+
124
+ guides_weights += [guide_weight / s_c] * s_c
125
+
126
+ guides_source = np.concatenate(guides_source, axis=-1)
127
+ guides_target = np.concatenate(guides_target, axis=-1)
128
+ guides_weights = (c_float * len(guides_weights))(*guides_weights)
129
+
130
+ styleWeight = 1.0
131
+ style_weights = [styleWeight / sc for i in range(sc)]
132
+ style_weights = (c_float * sc)(*style_weights)
133
+
134
+ maxPyramidLevels = 0
135
+ for level in range(32, -1, -1):
136
+ if min(min(sh, t_h) * pow(2.0, -level), \
137
+ min(sw, t_w) * pow(2.0, -level)) >= (2 * patch_size + 1):
138
+ maxPyramidLevels = level + 1
139
+ break
140
+
141
+ if num_pyramid_levels == -1:
142
+ num_pyramid_levels = maxPyramidLevels
143
+ num_pyramid_levels = min(num_pyramid_levels, maxPyramidLevels)
144
+
145
+ num_search_vote_iters_per_level = (c_int * num_pyramid_levels)(*[num_search_vote_iters] * num_pyramid_levels)
146
+ num_patch_match_iters_per_level = (c_int * num_pyramid_levels)(*[num_patch_match_iters] * num_pyramid_levels)
147
+ stop_threshold_per_level = (c_int * num_pyramid_levels)(*[stop_threshold] * num_pyramid_levels)
148
+
149
+ buffer = cached_buffer.get((t_h, t_w, sc), None)
150
+ if buffer is None:
151
+ buffer = create_string_buffer(t_h * t_w * sc)
152
+ cached_buffer[(t_h, t_w, sc)] = buffer
153
+
154
+ libebsynth.ebsynthRun(EBSYNTH_BACKEND_AUTO, # backend
155
+ sc, # numStyleChannels
156
+ guides_source.shape[-1], # numGuideChannels
157
+ sw, # sourceWidth
158
+ sh, # sourceHeight
159
+ img_style.tobytes(),
160
+ # sourceStyleData (width * height * numStyleChannels) bytes, scan-line order
161
+ guides_source.tobytes(),
162
+ # sourceGuideData (width * height * numGuideChannels) bytes, scan-line order
163
+ t_w, # targetWidth
164
+ t_h, # targetHeight
165
+ guides_target.tobytes(),
166
+ # targetGuideData (width * height * numGuideChannels) bytes, scan-line order
167
+ None,
168
+ # targetModulationData (width * height * numGuideChannels) bytes, scan-line order; pass NULL to switch off the modulation
169
+ style_weights, # styleWeights (numStyleChannels) floats
170
+ guides_weights, # guideWeights (numGuideChannels) floats
171
+ uniformity_weight,
172
+ # uniformityWeight reasonable values are between 500-15000, 3500 is a good default
173
+ patch_size, # patchSize odd sizes only, use 5 for 5x5 patch, 7 for 7x7, etc.
174
+ EBSYNTH_VOTEMODE_WEIGHTED, # voteMode use VOTEMODE_WEIGHTED for sharper result
175
+ num_pyramid_levels, # numPyramidLevels
176
+
177
+ num_search_vote_iters_per_level,
178
+ # numSearchVoteItersPerLevel how many search/vote iters to perform at each level (array of ints, coarse first, fine last)
179
+ num_patch_match_iters_per_level,
180
+ # numPatchMatchItersPerLevel how many Patch-Match iters to perform at each level (array of ints, coarse first, fine last)
181
+ stop_threshold_per_level,
182
+ # stopThresholdPerLevel stop improving pixel when its change since last iteration falls under this threshold
183
+ 1 if extraPass3x3 else 0,
184
+ # extraPass3x3 perform additional polishing pass with 3x3 patches at the finest level, use 0 to disable
185
+ None, # outputNnfData (width * height * 2) ints, scan-line order; pass NULL to ignore
186
+ buffer # outputImageData (width * height * numStyleChannels) bytes, scan-line order
187
+ )
188
+
189
+ return np.frombuffer(buffer, dtype=np.uint8).reshape((t_h, t_w, sc)).copy()
190
+
191
+
192
+ # transfer color from source to target
193
+ def color_transfer(img_source, img_target):
194
+ guides = [(cv2.cvtColor(img_source, cv2.COLOR_BGR2GRAY),
195
+ cv2.cvtColor(img_target, cv2.COLOR_BGR2GRAY),
196
+ 1)]
197
+ h, w, c = img_source.shape
198
+ result = []
199
+ for i in range(c):
200
+ result += [
201
+ run(img_source[..., i:i + 1], guides=guides,
202
+ patch_size=11,
203
+ num_pyramid_levels=40,
204
+ num_search_vote_iters=6,
205
+ num_patch_match_iters=4,
206
+ stop_threshold=5,
207
+ uniformity_weight=500.0,
208
+ extraPass3x3=True,
209
+ )
210
+
211
+ ]
212
+ return np.concatenate(result, axis=-1)
213
+
214
+
215
+ def task(img_style, guides):
216
+ return run(img_style,
217
+ guides,
218
+ patch_size=5,
219
+ num_pyramid_levels=6,
220
+ num_search_vote_iters=12,
221
+ num_patch_match_iters=6,
222
+ uniformity_weight=3500.0,
223
+ extraPass3x3=False
224
+ )
ebsynth/ebsynth.dll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a2499714514ea5016a04145d653012bfa6e74906293e0bae5a51ac155dba4a6
3
+ size 28829696
ebsynth/ebsynth_generate.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from PIL.Image import Image
4
+ from dataclasses import dataclass, field
5
+ from toolz import groupby
6
+
7
+
8
+ @dataclass
9
+ class Keyframe:
10
+ num: int
11
+ image: np.ndarray = field(repr=False)
12
+ prompt: str = field(repr=False)
13
+
14
+
15
+ @dataclass
16
+ class Sequence:
17
+ start: int
18
+ keyframe: Keyframe
19
+ end: int
20
+ # 当前序列的所有帧
21
+ frames: dict[int, np.ndarray] = field(default_factory=dict, repr=False)
22
+ # 序列生成的所有帧
23
+ generate_frames: dict[int, np.ndarray] = field(default_factory=dict, repr=False)
24
+
25
+
26
+ @dataclass
27
+ class EbSynthTask:
28
+ style: np.ndarray = field(repr=False)
29
+ source: np.ndarray = field(repr=False)
30
+ target: np.ndarray = field(repr=False)
31
+ frame_num: int
32
+ key_frame_num: int
33
+ weight: float = field(default=1.0, repr=False)
34
+
35
+
36
+ class EbsynthGenerate:
37
+ def __init__(self, keyframes: list[Keyframe], frames: list[np.ndarray], fps: int):
38
+ self.keyframes = keyframes
39
+ self.frames = frames
40
+ self.fps = fps
41
+ self.sequences = []
42
+ self.setup_sequences()
43
+
44
+ def setup_sequences(self):
45
+ """
46
+ 初始化序列,在这个阶段,frame_num对应的帧就已经处理好了,在后面使用不需要再处理frame-1了
47
+ """
48
+ self.sequences.clear()
49
+ all_frames = len(self.frames)
50
+ left_frame = 1
51
+ for i, keyframe in enumerate(self.keyframes):
52
+ right_frame = self.keyframes[i + 1].num if i + 1 < len(self.keyframes) else all_frames
53
+ frames = {}
54
+ for frame_num in range(left_frame, right_frame + 1):
55
+ frames[frame_num] = self.frames[frame_num - 1]
56
+ sequence = Sequence(left_frame, keyframe, right_frame, frames)
57
+ self.sequences.append(sequence)
58
+ left_frame = keyframe.num
59
+ return self.sequences
60
+
61
+ def get_tasks(self, weight: float = 4.0) -> list[EbSynthTask]:
62
+ tasks = []
63
+ for i, sequence in enumerate(self.sequences):
64
+ frames = sequence.frames.items()
65
+ source = sequence.frames[sequence.keyframe.num]
66
+ style = sequence.keyframe.image
67
+ for frame_num, frame in frames:
68
+ target = frame
69
+ task = EbSynthTask(style, source, target, frame_num, sequence.keyframe.num, weight)
70
+ tasks.append(task)
71
+ return tasks
72
+
73
+ def append_generate_frames(self, key_frames_num, frame_num, generate_frames):
74
+ """
75
+
76
+ Args:
77
+ key_frames_num: 用于定位sequence
78
+ frame_num: key
79
+ generate_frames: value
80
+
81
+ Returns:
82
+
83
+ """
84
+ for sequence in self.sequences:
85
+ if sequence.keyframe.num == key_frames_num:
86
+ sequence.generate_frames[frame_num] = generate_frames
87
+ break
88
+ else:
89
+ raise ValueError(f'not found key frame num {key_frames_num}')
90
+
91
+ def merge_sequences(self):
92
+ # 存储合并后的结果
93
+ merged_frames = []
94
+ border = 1
95
+ for i in range(len(self.sequences)):
96
+ current_seq = self.sequences[i]
97
+ next_seq = self.sequences[i + 1] if i + 1 < len(self.sequences) else None
98
+
99
+ # 如果存在下一个序列
100
+ if next_seq:
101
+ # 获取两个序列的帧交集
102
+ common_frames_nums = set(current_seq.frames.keys()).intersection(
103
+ set(range(next_seq.start + border, next_seq.end)) if i > 0 else set(
104
+ range(next_seq.start, next_seq.end)))
105
+
106
+ for j, frame_num in enumerate(common_frames_nums):
107
+ # 从两个序列中获取帧并合并
108
+ frame1 = current_seq.generate_frames[frame_num]
109
+ frame2 = next_seq.generate_frames[frame_num]
110
+
111
+ weight = float(j) / float(len(common_frames_nums))
112
+ merged_frame = cv2.addWeighted(frame1, 1 - weight, frame2, weight, 0)
113
+ merged_frames.append((frame_num, merged_frame))
114
+
115
+ # 如果没有下一个序列
116
+ else:
117
+ # 添加与前一序列的差集帧到结果中
118
+ if i > 0:
119
+ prev_seq = self.sequences[i - 1]
120
+ difference_frames_nums = set(current_seq.frames.keys()) - set(prev_seq.frames.keys())
121
+ else:
122
+ difference_frames_nums = set(current_seq.frames.keys())
123
+
124
+ for frame_num in difference_frames_nums:
125
+ merged_frames.append((frame_num, current_seq.generate_frames[frame_num]))
126
+
127
+ # group_merged_frames = groupby(lambda x: x[0], merged_frames)
128
+ # merged_frames.clear()
129
+ # # 取出value长度大于1的元素
130
+ # for key, value in group_merged_frames.items():
131
+ # if len(value) > 1:
132
+ # # 将value中的所有元素合并
133
+ # merged_frame = value[0][1]
134
+ # for i in range(1, len(value)):
135
+ # merged_frame = cv2.addWeighted(merged_frame, weight, value[i][1], 1 - weight, 0)
136
+ # merged_frames.append((key, merged_frame))
137
+ # else:
138
+ # merged_frames.append((key, value[0][1]))
139
+ result = []
140
+ for i, frame in sorted(merged_frames, key=lambda x: x[0]):
141
+ result.append(frame)
142
+
143
+ return result
images/1.png ADDED
images/2.jpg ADDED
images/alipay.png ADDED
images/img.png ADDED
images/wechat.png ADDED
install.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+ import launch
4
+
5
+ if not launch.is_installed("cv2"):
6
+ print('Installing requirements for Mov2mov')
7
+ launch.run_pip("install opencv-python", "requirements for opencv")
8
+
9
+ if platform.system() == 'Windows':
10
+ if not launch.is_installed('imageio'):
11
+ print('Installing requirements for Mov2mov')
12
+ launch.run_pip("install imageio", "requirements for imageio")
13
+ if not launch.is_installed('imageio-ffmpeg'):
14
+ print('Installing requirements for Mov2mov')
15
+ launch.run_pip("install imageio-ffmpeg", "requirements for imageio-ffmpeg")
16
+ else:
17
+ if not launch.is_installed('ffmpeg'):
18
+ print('Installing requirements for Mov2mov')
19
+ launch.run_pip("install ffmpeg", "requirements for ffmpeg")
javascript/m2m_ui.js ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ function submit_mov2mov() {
2
+ rememberGallerySelection('mov2mov_gallery')
3
+ showSubmitButtons('mov2mov', false)
4
+ showResultVideo('mov2mov', false)
5
+
6
+ var id = randomId()
7
+ requestProgress(id, gradioApp().getElementById('mov2mov_gallery_container'), gradioApp().getElementById('mov2mov_gallery'), function () {
8
+ showSubmitButtons('mov2mov', true)
9
+ showResultVideo('mov2mov', true)
10
+ })
11
+
12
+ var res = create_submit_args(arguments)
13
+ res[0] = id
14
+ return res
15
+ }
16
+
17
+ function showResultVideo(tabname, show) {
18
+ gradioApp().getElementById(tabname + '_video').style.display = show ? "block" : "none"
19
+ gradioApp().getElementById(tabname + '_gallery').style.display = show ? "none" : "block"
20
+
21
+ }
22
+
23
+
24
+ function showModnetModels() {
25
+ var check = arguments[0]
26
+ gradioApp().getElementById('mov2mov_modnet_model').style.display = check ? "block" : "none"
27
+ gradioApp().getElementById('mov2mov_merge_background').style.display = check ? "block" : "none"
28
+ return []
29
+ }
30
+
31
+ function switchModnetMode() {
32
+ let mode = arguments[0]
33
+
34
+ if (mode === 'Clear' || mode === 'Origin' || mode === 'Green' || mode === 'Image') {
35
+ gradioApp().getElementById('modnet_background_movie').style.display = "none"
36
+ gradioApp().getElementById('modnet_background_image').style.display = "block"
37
+ } else {
38
+ gradioApp().getElementById('modnet_background_movie').style.display = "block"
39
+ gradioApp().getElementById('modnet_background_image').style.display = "none"
40
+ }
41
+
42
+ return []
43
+ }
44
+
45
+
46
+ function copy_from(type) {
47
+ return []
48
+ }
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ opencv-python
2
+ imageio
3
+ imageio-ffmpeg
scripts/m2m_config.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ mov2mov_outpath_samples = 'outputs/mov2mov-images'
2
+ mov2mov_output_dir = 'outputs/mov2mov-videos'
scripts/m2m_hook.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ def patch(key, obj, field, replacement):
4
+ """Replaces a function in a module or a class.
5
+
6
+ Also stores the original function in this module, possible to be retrieved via original(key, obj, field).
7
+ If the function is already replaced by this caller (key), an exception is raised -- use undo() before that.
8
+
9
+ Arguments:
10
+ key: identifying information for who is doing the replacement. You can use __name__.
11
+ obj: the module or the class
12
+ field: name of the function as a string
13
+ replacement: the new function
14
+
15
+ Returns:
16
+ the original function
17
+ """
18
+
19
+ patch_key = (obj, field)
20
+ if patch_key in originals[key]:
21
+ raise RuntimeError(f"patch for {field} is already applied")
22
+
23
+ original_func = getattr(obj, field)
24
+ originals[key][patch_key] = original_func
25
+
26
+ setattr(obj, field, replacement)
27
+
28
+ return original_func
29
+
30
+
31
+ def undo(key, obj, field):
32
+ """Undoes the peplacement by the patch().
33
+
34
+ If the function is not replaced, raises an exception.
35
+
36
+ Arguments:
37
+ key: identifying information for who is doing the replacement. You can use __name__.
38
+ obj: the module or the class
39
+ field: name of the function as a string
40
+
41
+ Returns:
42
+ Always None
43
+ """
44
+
45
+ patch_key = (obj, field)
46
+
47
+ if patch_key not in originals[key]:
48
+ raise RuntimeError(f"there is no patch for {field} to undo")
49
+
50
+ original_func = originals[key].pop(patch_key)
51
+ setattr(obj, field, original_func)
52
+
53
+ return None
54
+
55
+
56
+ def original(key, obj, field):
57
+ """Returns the original function for the patch created by the patch() function"""
58
+ patch_key = (obj, field)
59
+
60
+ return originals[key].get(patch_key, None)
61
+
62
+
63
+ originals = defaultdict(dict)
scripts/m2m_ui.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import os
3
+ import platform
4
+ import shutil
5
+ import subprocess as sp
6
+ import sys
7
+
8
+ import gradio as gr
9
+
10
+ import modules
11
+ import modules.scripts as scripts
12
+ from modules import (
13
+ script_callbacks,
14
+ shared,
15
+ call_queue,
16
+ sd_samplers,
17
+ ui_prompt_styles,
18
+ sd_models,
19
+ )
20
+ from modules.call_queue import wrap_gradio_gpu_call
21
+ from modules.images import image_data
22
+ from modules.shared import opts
23
+ from modules.ui import (
24
+ ordered_ui_categories,
25
+ create_sampler_and_steps_selection,
26
+ switch_values_symbol,
27
+ create_override_settings_dropdown,
28
+ detect_image_size_symbol,
29
+ plaintext_to_html,
30
+ paste_symbol,
31
+ clear_prompt_symbol,
32
+ restore_progress_symbol,
33
+ )
34
+ from modules.ui_common import (
35
+ folder_symbol,
36
+ update_generation_info,
37
+ create_refresh_button,
38
+ )
39
+ from modules.ui_components import (
40
+ ResizeHandleRow,
41
+ FormRow,
42
+ ToolButton,
43
+ FormGroup,
44
+ InputAccordion,
45
+ )
46
+ from scripts import m2m_hook as patches
47
+ from scripts import m2m_util
48
+ from scripts import mov2mov
49
+ from scripts.mov2mov import scripts_mov2mov
50
+ from scripts.m2m_config import mov2mov_outpath_samples, mov2mov_output_dir
51
+ from scripts.movie_editor import MovieEditor
52
+
53
+ id_part = "mov2mov"
54
+
55
+
56
+ def save_video(video):
57
+ path = "logs/movies"
58
+ if not os.path.exists(path):
59
+ os.makedirs(path, exist_ok=True)
60
+ index = len([path for path in os.listdir(path) if path.endswith(".mp4")]) + 1
61
+ video_path = os.path.join(path, str(index).zfill(5) + ".mp4")
62
+ shutil.copyfile(video, video_path)
63
+ filename = os.path.relpath(video_path, path)
64
+ return gr.File.update(value=video_path, visible=True), plaintext_to_html(
65
+ f"Saved: {filename}"
66
+ )
67
+
68
+
69
+ class Toprow:
70
+ """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation"""
71
+
72
+ def __init__(self, is_img2img, id_part=None):
73
+ if not id_part:
74
+ id_part = "img2img" if is_img2img else "txt2img"
75
+ self.id_part = id_part
76
+
77
+ with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"):
78
+ with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6):
79
+ with gr.Row():
80
+ with gr.Column(scale=80):
81
+ with gr.Row():
82
+ self.prompt = gr.Textbox(
83
+ label="Prompt",
84
+ elem_id=f"{id_part}_prompt",
85
+ show_label=False,
86
+ lines=3,
87
+ placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)",
88
+ elem_classes=["prompt"],
89
+ )
90
+ self.prompt_img = gr.File(
91
+ label="",
92
+ elem_id=f"{id_part}_prompt_image",
93
+ file_count="single",
94
+ type="binary",
95
+ visible=False,
96
+ )
97
+
98
+ with gr.Row():
99
+ with gr.Column(scale=80):
100
+ with gr.Row():
101
+ self.negative_prompt = gr.Textbox(
102
+ label="Negative prompt",
103
+ elem_id=f"{id_part}_neg_prompt",
104
+ show_label=False,
105
+ lines=3,
106
+ placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)",
107
+ elem_classes=["prompt"],
108
+ )
109
+
110
+ self.button_interrogate = None
111
+ self.button_deepbooru = None
112
+ if is_img2img:
113
+ with gr.Column(scale=1, elem_classes="interrogate-col"):
114
+ self.button_interrogate = gr.Button(
115
+ "Interrogate\nCLIP", elem_id="interrogate"
116
+ )
117
+ self.button_deepbooru = gr.Button(
118
+ "Interrogate\nDeepBooru", elem_id="deepbooru"
119
+ )
120
+
121
+ with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"):
122
+ with gr.Row(
123
+ elem_id=f"{id_part}_generate_box", elem_classes="generate-box"
124
+ ):
125
+ self.interrupt = gr.Button(
126
+ "Interrupt",
127
+ elem_id=f"{id_part}_interrupt",
128
+ elem_classes="generate-box-interrupt",
129
+ )
130
+ self.skip = gr.Button(
131
+ "Skip",
132
+ elem_id=f"{id_part}_skip",
133
+ elem_classes="generate-box-skip",
134
+ )
135
+ self.submit = gr.Button(
136
+ "Generate", elem_id=f"{id_part}_generate", variant="primary"
137
+ )
138
+
139
+ self.skip.click(
140
+ fn=lambda: shared.state.skip(),
141
+ inputs=[],
142
+ outputs=[],
143
+ )
144
+
145
+ self.interrupt.click(
146
+ fn=lambda: shared.state.interrupt(),
147
+ inputs=[],
148
+ outputs=[],
149
+ )
150
+
151
+ with gr.Row(elem_id=f"{id_part}_tools"):
152
+ self.paste = ToolButton(value=paste_symbol, elem_id="paste")
153
+
154
+ self.clear_prompt_button = ToolButton(
155
+ value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt"
156
+ )
157
+ self.restore_progress_button = ToolButton(
158
+ value=restore_progress_symbol,
159
+ elem_id=f"{id_part}_restore_progress",
160
+ visible=False,
161
+ )
162
+
163
+ self.token_counter = gr.HTML(
164
+ value="<span>0/75</span>",
165
+ elem_id=f"{id_part}_token_counter",
166
+ elem_classes=["token-counter"],
167
+ )
168
+ self.token_button = gr.Button(
169
+ visible=False, elem_id=f"{id_part}_token_button"
170
+ )
171
+ self.negative_token_counter = gr.HTML(
172
+ value="<span>0/75</span>",
173
+ elem_id=f"{id_part}_negative_token_counter",
174
+ elem_classes=["token-counter"],
175
+ )
176
+ self.negative_token_button = gr.Button(
177
+ visible=False, elem_id=f"{id_part}_negative_token_button"
178
+ )
179
+
180
+ self.clear_prompt_button.click(
181
+ fn=lambda *x: x,
182
+ _js="confirm_clear_prompt",
183
+ inputs=[self.prompt, self.negative_prompt],
184
+ outputs=[self.prompt, self.negative_prompt],
185
+ )
186
+
187
+ self.ui_styles = ui_prompt_styles.UiPromptStyles(
188
+ id_part, self.prompt, self.negative_prompt
189
+ )
190
+
191
+ self.prompt_img.change(
192
+ fn=modules.images.image_data,
193
+ inputs=[self.prompt_img],
194
+ outputs=[self.prompt, self.prompt_img],
195
+ show_progress=False,
196
+ )
197
+
198
+
199
+ def create_output_panel(tabname, outdir):
200
+ def open_folder(f):
201
+ if not os.path.exists(f):
202
+ print(
203
+ f'Folder "{f}" does not exist. After you create an image, the folder will be created.'
204
+ )
205
+ return
206
+ elif not os.path.isdir(f):
207
+ print(
208
+ f"""
209
+ WARNING
210
+ An open_folder request was made with an argument that is not a folder.
211
+ This could be an error or a malicious attempt to run code on your computer.
212
+ Requested path was: {f}
213
+ """,
214
+ file=sys.stderr,
215
+ )
216
+ return
217
+
218
+ if not shared.cmd_opts.hide_ui_dir_config:
219
+ path = os.path.normpath(f)
220
+ if platform.system() == "Windows":
221
+ os.startfile(path)
222
+ elif platform.system() == "Darwin":
223
+ sp.Popen(["open", path])
224
+ elif "microsoft-standard-WSL2" in platform.uname().release:
225
+ sp.Popen(["wsl-open", path])
226
+ else:
227
+ sp.Popen(["xdg-open", path])
228
+
229
+ with gr.Column(variant="panel", elem_id=f"{tabname}_results"):
230
+ with gr.Group(elem_id=f"{tabname}_gallery_container"):
231
+ result_gallery = gr.Gallery(
232
+ label="Output",
233
+ show_label=False,
234
+ elem_id=f"{tabname}_gallery",
235
+ columns=4,
236
+ preview=True,
237
+ height=shared.opts.gallery_height or None,
238
+ )
239
+ result_video = gr.PlayableVideo(
240
+ label="Output Video", show_label=False, elem_id=f"{tabname}_video"
241
+ )
242
+
243
+ generation_info = None
244
+ with gr.Column():
245
+ with gr.Row(
246
+ elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"
247
+ ):
248
+ open_folder_button = ToolButton(
249
+ folder_symbol,
250
+ elem_id=f"{tabname}_open_folder",
251
+ visible=not shared.cmd_opts.hide_ui_dir_config,
252
+ tooltip="Open images output directory.",
253
+ )
254
+
255
+ if tabname != "extras":
256
+ save = ToolButton(
257
+ "💾",
258
+ elem_id=f"save_{tabname}",
259
+ tooltip=f"Save the image to a dedicated directory ({shared.opts.outdir_save}).",
260
+ )
261
+
262
+ open_folder_button.click(
263
+ fn=lambda: open_folder(shared.opts.outdir_samples or outdir),
264
+ inputs=[],
265
+ outputs=[],
266
+ )
267
+
268
+ download_files = gr.File(
269
+ None,
270
+ file_count="multiple",
271
+ interactive=False,
272
+ show_label=False,
273
+ visible=False,
274
+ elem_id=f"download_files_{tabname}",
275
+ )
276
+
277
+ with gr.Group():
278
+ html_info = gr.HTML(
279
+ elem_id=f"html_info_{tabname}", elem_classes="infotext"
280
+ )
281
+ html_log = gr.HTML(
282
+ elem_id=f"html_log_{tabname}", elem_classes="html-log"
283
+ )
284
+
285
+ generation_info = gr.Textbox(
286
+ visible=False, elem_id=f"generation_info_{tabname}"
287
+ )
288
+ if tabname == "txt2img" or tabname == "img2img" or tabname == "mov2mov":
289
+ generation_info_button = gr.Button(
290
+ visible=False, elem_id=f"{tabname}_generation_info_button"
291
+ )
292
+ generation_info_button.click(
293
+ fn=update_generation_info,
294
+ _js="function(x, y, z){ return [x, y, selected_gallery_index()] }",
295
+ inputs=[generation_info, html_info, html_info],
296
+ outputs=[html_info, html_info],
297
+ show_progress=False,
298
+ )
299
+
300
+ save.click(
301
+ fn=call_queue.wrap_gradio_call(save_video),
302
+ inputs=[result_video],
303
+ outputs=[
304
+ download_files,
305
+ html_log,
306
+ ],
307
+ show_progress=False,
308
+ )
309
+
310
+ return result_gallery, result_video, generation_info, html_info, html_log
311
+
312
+
313
+ def create_refiner():
314
+ with InputAccordion(
315
+ False, label="Refiner", elem_id=f"{id_part}_enable"
316
+ ) as enable_refiner:
317
+ with gr.Row():
318
+ refiner_checkpoint = gr.Dropdown(
319
+ label="Checkpoint",
320
+ elem_id=f"{id_part}_checkpoint",
321
+ choices=sd_models.checkpoint_tiles(),
322
+ value="",
323
+ tooltip="switch to another model in the middle of generation",
324
+ )
325
+ create_refresh_button(
326
+ refiner_checkpoint,
327
+ sd_models.list_models,
328
+ lambda: {"choices": sd_models.checkpoint_tiles()},
329
+ f"{id_part}_checkpoint_refresh",
330
+ )
331
+
332
+ refiner_switch_at = gr.Slider(
333
+ value=0.8,
334
+ label="Switch at",
335
+ minimum=0.01,
336
+ maximum=1.0,
337
+ step=0.01,
338
+ elem_id=f"{id_part}_switch_at",
339
+ tooltip="fraction of sampling steps when the switch to refiner model should happen; 1=never, 0.5=switch in the middle of generation",
340
+ )
341
+ return enable_refiner, refiner_checkpoint, refiner_switch_at
342
+
343
+
344
+ def on_ui_tabs():
345
+ scripts_mov2mov.initialize_scripts(is_img2img=True)
346
+
347
+ # with gr.Blocks(analytics_enabled=False) as mov2mov_interface:
348
+ with gr.TabItem(
349
+ "mov2mov", id=f"tab_{id_part}", elem_id=f"tab_{id_part}"
350
+ ) as mov2mov_interface:
351
+ toprow = Toprow(is_img2img=False, id_part=id_part)
352
+ dummy_component = gr.Label(visible=False)
353
+ with gr.Tab(
354
+ "Generation", id=f"{id_part}_generation"
355
+ ) as mov2mov_generation_tab, ResizeHandleRow(equal_height=False):
356
+ with gr.Column(variant="compact", elem_id="mov2mov_settings"):
357
+ with gr.Tabs(elem_id=f"mode_{id_part}"):
358
+ init_mov = gr.Video(
359
+ label="Video for mov2mov",
360
+ elem_id=f"{id_part}_mov",
361
+ show_label=False,
362
+ source="upload",
363
+ )
364
+
365
+ with FormRow():
366
+ resize_mode = gr.Radio(
367
+ label="Resize mode",
368
+ elem_id=f"{id_part}_resize_mode",
369
+ choices=[
370
+ "Just resize",
371
+ "Crop and resize",
372
+ "Resize and fill",
373
+ "Just resize (latent upscale)",
374
+ ],
375
+ type="index",
376
+ value="Just resize",
377
+ )
378
+ scripts_mov2mov.prepare_ui()
379
+
380
+ for category in ordered_ui_categories():
381
+ if category == "sampler":
382
+ steps, sampler_name = create_sampler_and_steps_selection(
383
+ sd_samplers.visible_sampler_names(), id_part
384
+ )
385
+ elif category == "dimensions":
386
+ with FormRow():
387
+ with gr.Column(elem_id=f"{id_part}_column_size", scale=4):
388
+ with gr.Tabs():
389
+ with gr.Tab(
390
+ label="Resize to",
391
+ elem_id=f"{id_part}_tab_resize_to",
392
+ ) as tab_scale_to:
393
+ with FormRow():
394
+ with gr.Column(
395
+ elem_id=f"{id_part}_column_size",
396
+ scale=4,
397
+ ):
398
+ width = gr.Slider(
399
+ minimum=64,
400
+ maximum=2048,
401
+ step=8,
402
+ label="Width",
403
+ value=512,
404
+ elem_id=f"{id_part}_width",
405
+ )
406
+ height = gr.Slider(
407
+ minimum=64,
408
+ maximum=2048,
409
+ step=8,
410
+ label="Height",
411
+ value=512,
412
+ elem_id=f"{id_part}_height",
413
+ )
414
+ with gr.Column(
415
+ elem_id=f"{id_part}_dimensions_row",
416
+ scale=1,
417
+ elem_classes="dimensions-tools",
418
+ ):
419
+ res_switch_btn = ToolButton(
420
+ value=switch_values_symbol,
421
+ elem_id=f"{id_part}_res_switch_btn",
422
+ )
423
+ detect_image_size_btn = ToolButton(
424
+ value=detect_image_size_symbol,
425
+ elem_id=f"{id_part}_detect_image_size_btn",
426
+ )
427
+ elif category == "denoising":
428
+ denoising_strength = gr.Slider(
429
+ minimum=0.0,
430
+ maximum=1.0,
431
+ step=0.01,
432
+ label="Denoising strength",
433
+ value=0.75,
434
+ elem_id=f"{id_part}_denoising_strength",
435
+ )
436
+
437
+ noise_multiplier = gr.Slider(
438
+ minimum=0,
439
+ maximum=1.5,
440
+ step=0.01,
441
+ label="Noise multiplier",
442
+ elem_id=f"{id_part}_noise_multiplier",
443
+ value=1,
444
+ )
445
+ with gr.Row(elem_id=f"{id_part}_frames_setting"):
446
+ movie_frames = gr.Slider(
447
+ minimum=10,
448
+ maximum=60,
449
+ step=1,
450
+ label="Movie FPS",
451
+ elem_id=f"{id_part}_movie_frames",
452
+ value=30,
453
+ )
454
+ max_frames = gr.Number(
455
+ label="Max FPS",
456
+ value=-1,
457
+ elem_id=f"{id_part}_max_frames",
458
+ )
459
+
460
+ elif category == "cfg":
461
+ with gr.Row():
462
+ cfg_scale = gr.Slider(
463
+ minimum=1.0,
464
+ maximum=30.0,
465
+ step=0.5,
466
+ label="CFG Scale",
467
+ value=7.0,
468
+ elem_id=f"{id_part}_cfg_scale",
469
+ )
470
+ image_cfg_scale = gr.Slider(
471
+ minimum=0,
472
+ maximum=3.0,
473
+ step=0.05,
474
+ label="Image CFG Scale",
475
+ value=1.5,
476
+ elem_id=f"{id_part}_image_cfg_scale",
477
+ visible=False,
478
+ )
479
+
480
+ elif category == "checkboxes":
481
+ with FormRow(elem_classes="checkboxes-row", variant="compact"):
482
+ pass
483
+
484
+ elif category == "accordions":
485
+ with gr.Row(
486
+ elem_id=f"{id_part}_accordions", elem_classes="accordions"
487
+ ):
488
+ scripts_mov2mov.setup_ui_for_section(category)
489
+
490
+ elif category == "override_settings":
491
+ with FormRow(elem_id=f"{id_part}_override_settings_row") as row:
492
+ override_settings = create_override_settings_dropdown(
493
+ "mov2mov", row
494
+ )
495
+
496
+ elif category == "scripts":
497
+ editor = MovieEditor(id_part, init_mov, movie_frames)
498
+ editor.render()
499
+ with FormGroup(elem_id=f"{id_part}_script_container"):
500
+ custom_inputs = scripts_mov2mov.setup_ui()
501
+
502
+ if category not in {"accordions"}:
503
+ scripts_mov2mov.setup_ui_for_section(category)
504
+
505
+ (
506
+ mov2mov_gallery,
507
+ result_video,
508
+ generation_info,
509
+ html_info,
510
+ html_log,
511
+ ) = create_output_panel(id_part, opts.mov2mov_output_dir)
512
+
513
+ res_switch_btn.click(
514
+ fn=None,
515
+ _js="function(){switchWidthHeight('mov2mov')}",
516
+ inputs=None,
517
+ outputs=None,
518
+ show_progress=False,
519
+ )
520
+
521
+ # calc video size
522
+ detect_image_size_btn.click(
523
+ fn=calc_video_w_h,
524
+ inputs=[init_mov, width, height],
525
+ outputs=[width, height],
526
+ )
527
+
528
+ mov2mov_args = dict(
529
+ fn=wrap_gradio_gpu_call(mov2mov.mov2mov, extra_outputs=[None, "", ""]),
530
+ _js="submit_mov2mov",
531
+ inputs=[
532
+ dummy_component,
533
+ toprow.prompt,
534
+ toprow.negative_prompt,
535
+ toprow.ui_styles.dropdown,
536
+ init_mov,
537
+ steps,
538
+ sampler_name,
539
+ cfg_scale,
540
+ image_cfg_scale,
541
+ denoising_strength,
542
+ height,
543
+ width,
544
+ resize_mode,
545
+ override_settings,
546
+ # refiner
547
+ # enable_refiner, refiner_checkpoint, refiner_switch_at,
548
+ # mov2mov params
549
+ noise_multiplier,
550
+ movie_frames,
551
+ max_frames,
552
+ # editor
553
+ editor.gr_enable_movie_editor,
554
+ editor.gr_df,
555
+ editor.gr_eb_weight,
556
+ ]
557
+ + custom_inputs,
558
+ outputs=[
559
+ result_video,
560
+ generation_info,
561
+ html_info,
562
+ html_log,
563
+ ],
564
+ show_progress=False,
565
+ )
566
+
567
+ toprow.submit.click(**mov2mov_args)
568
+
569
+ return [(mov2mov_interface, "mov2mov", f"{id_part}_tabs")]
570
+
571
+
572
+ def calc_video_w_h(video, width, height):
573
+ if not video:
574
+ return width, height
575
+
576
+ return m2m_util.calc_video_w_h(video)
577
+
578
+
579
+ def on_ui_settings():
580
+ section = ("mov2mov", "Mov2Mov")
581
+ shared.opts.add_option(
582
+ "mov2mov_outpath_samples",
583
+ shared.OptionInfo(
584
+ mov2mov_outpath_samples, "Mov2Mov output path for image", section=section
585
+ ),
586
+ )
587
+ shared.opts.add_option(
588
+ "mov2mov_output_dir",
589
+ shared.OptionInfo(
590
+ mov2mov_output_dir, "Mov2Mov output path for video", section=section
591
+ ),
592
+ )
593
+
594
+
595
+ img2img_toprow: gr.Row = None
596
+
597
+
598
+ def block_context_init(self, *args, **kwargs):
599
+ origin_block_context_init(self, *args, **kwargs)
600
+
601
+ if self.elem_id == "tab_img2img":
602
+ self.parent.__enter__()
603
+ on_ui_tabs()
604
+ self.parent.__exit__()
605
+
606
+
607
+ def on_app_reload():
608
+ global origin_block_context_init
609
+ if origin_block_context_init:
610
+ patches.undo(__name__, obj=gr.blocks.BlockContext, field="__init__")
611
+ origin_block_context_init = None
612
+
613
+
614
+ origin_block_context_init = patches.patch(
615
+ __name__,
616
+ obj=gr.blocks.BlockContext,
617
+ field="__init__",
618
+ replacement=block_context_init,
619
+ )
620
+ script_callbacks.on_before_reload(on_app_reload)
621
+ script_callbacks.on_ui_settings(on_ui_settings)
622
+ # script_callbacks.on_ui_tabs(on_ui_tabs)
scripts/m2m_util.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ import platform
3
+ import cv2
4
+ import numpy
5
+ import imageio
6
+
7
+
8
+ def calc_video_w_h(video_path):
9
+ cap = cv2.VideoCapture(video_path)
10
+
11
+ if not cap.isOpened():
12
+ raise ValueError("Can't open video file")
13
+
14
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
15
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
16
+
17
+ cap.release()
18
+
19
+ return width, height
20
+
21
+
22
+ def get_mov_frame_count(file):
23
+ if file is None:
24
+ return None
25
+ cap = cv2.VideoCapture(file)
26
+
27
+ if not cap.isOpened():
28
+ return None
29
+
30
+ frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
31
+ cap.release()
32
+ return frames
33
+
34
+
35
+ def get_mov_fps(file):
36
+ if file is None:
37
+ return None
38
+ cap = cv2.VideoCapture(file)
39
+
40
+ if not cap.isOpened():
41
+ return None
42
+
43
+ fps = cap.get(cv2.CAP_PROP_FPS)
44
+ cap.release()
45
+ return fps
46
+
47
+
48
+ def get_mov_all_images(file, frames, rgb=False):
49
+ if file is None:
50
+ return None
51
+ cap = cv2.VideoCapture(file)
52
+
53
+ if not cap.isOpened():
54
+ return None
55
+
56
+ fps = cap.get(cv2.CAP_PROP_FPS)
57
+ if frames > fps:
58
+ print('Waring: The set number of frames is greater than the number of video frames')
59
+ frames = int(fps)
60
+
61
+ skip = fps // frames
62
+ count = 1
63
+ fs = 1
64
+ image_list = []
65
+ while (True):
66
+ flag, frame = cap.read()
67
+ if not flag:
68
+ break
69
+ else:
70
+ if fs % skip == 0:
71
+ if rgb:
72
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
73
+ image_list.append(frame)
74
+ count += 1
75
+ fs += 1
76
+ cap.release()
77
+ return image_list
78
+
79
+
80
+ def images_to_video(images, frames, out_path):
81
+ if platform.system() == 'Windows':
82
+ # Use imageio with the 'libx264' codec on Windows
83
+ return images_to_video_imageio(images, frames, out_path, 'libx264')
84
+ elif platform.system() == 'Darwin':
85
+ # Use cv2 with the 'avc1' codec on Mac
86
+ return images_to_video_cv2(images, frames, out_path, 'avc1')
87
+ else:
88
+ # Use cv2 with the 'mp4v' codec on other operating systems as it's the most widely supported
89
+ return images_to_video_cv2(images, frames, out_path, 'mp4v')
90
+
91
+
92
+ def images_to_video_imageio(images, frames, out_path, codec):
93
+ # 判断out_path是否存在,不存在则创建
94
+ if not os.path.exists(os.path.dirname(out_path)):
95
+ os.makedirs(os.path.dirname(out_path), exist_ok=True)
96
+
97
+ with imageio.v2.get_writer(out_path, format='ffmpeg', mode='I', fps=frames, codec=codec) as writer:
98
+ for img in images:
99
+ writer.append_data(numpy.asarray(img))
100
+ return out_path
101
+
102
+
103
+ def images_to_video_cv2(images, frames, out_path, codec):
104
+ if len(images) <= 0:
105
+ return None
106
+ # 判断out_path是否存在,不存在则创建
107
+ if not os.path.exists(os.path.dirname(out_path)):
108
+ os.makedirs(os.path.dirname(out_path), exist_ok=True)
109
+
110
+ fourcc = cv2.VideoWriter_fourcc(*codec)
111
+ if len(images) > 0:
112
+ img = images[0]
113
+ img_width, img_height = img.size
114
+ w = img_width
115
+ h = img_height
116
+ video = cv2.VideoWriter(out_path, fourcc, frames, (w, h))
117
+ for image in images:
118
+ img = cv2.cvtColor(numpy.asarray(image), cv2.COLOR_RGB2BGR)
119
+ video.write(img)
120
+ video.release()
121
+ return out_path
scripts/module_ui_extensions.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio
2
+ from modules import script_callbacks, ui_components
3
+ from scripts import m2m_hook as patches
4
+
5
+
6
+ elem_ids = []
7
+
8
+
9
+ def fix_elem_id(component, **kwargs):
10
+ if "elem_id" not in kwargs:
11
+ return None
12
+ elem_id = kwargs["elem_id"]
13
+ if not elem_id:
14
+ return None
15
+ if elem_id not in elem_ids:
16
+ elem_ids.append(elem_id)
17
+ else:
18
+ elem_id = elem_id + "_" + str(elem_ids.count(elem_id))
19
+ elem_ids.append(elem_id)
20
+
21
+ return elem_id
22
+
23
+
24
+ def IOComponent_init(self, *args, **kwargs):
25
+ elem_id = fix_elem_id(self, **kwargs)
26
+ if elem_id:
27
+ kwargs.pop("elem_id")
28
+ res = original_IOComponent_init(self, elem_id=elem_id, *args, **kwargs)
29
+ else:
30
+ res = original_IOComponent_init(self, *args, **kwargs)
31
+ return res
32
+
33
+
34
+ def InputAccordion_init(self, *args, **kwargs):
35
+ elem_id = fix_elem_id(self, **kwargs)
36
+ if elem_id:
37
+ kwargs.pop("elem_id")
38
+ res = original_InputAccordion_init(self, elem_id=elem_id, *args, **kwargs)
39
+ else:
40
+ res = original_InputAccordion_init(self, *args, **kwargs)
41
+ return res
42
+
43
+
44
+ original_IOComponent_init = patches.patch(
45
+ __name__,
46
+ obj=gradio.components.IOComponent,
47
+ field="__init__",
48
+ replacement=IOComponent_init,
49
+ )
50
+
51
+ original_InputAccordion_init = patches.patch(
52
+ __name__,
53
+ obj=ui_components.InputAccordion,
54
+ field="__init__",
55
+ replacement=InputAccordion_init,
56
+ )
scripts/mov2mov.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ import platform
3
+ import time
4
+
5
+ import PIL.Image
6
+ from tqdm import tqdm
7
+
8
+ import modules
9
+
10
+ import cv2
11
+ import numpy as np
12
+ import pandas
13
+ from PIL import Image
14
+ from modules import shared, processing
15
+ from modules.generation_parameters_copypaste import create_override_settings_dict
16
+ from modules.processing import StableDiffusionProcessingImg2Img, process_images, Processed
17
+ from modules.shared import opts, state
18
+ from modules.ui import plaintext_to_html
19
+ import modules.scripts as scripts
20
+
21
+ from scripts.m2m_util import get_mov_all_images, images_to_video
22
+ from scripts.m2m_config import mov2mov_outpath_samples, mov2mov_output_dir
23
+ import modules
24
+ from ebsynth import EbsynthGenerate, Keyframe
25
+
26
+ scripts_mov2mov = scripts.ScriptRunner()
27
+
28
+
29
+ def check_data_frame(df: pandas.DataFrame):
30
+ # 删除df的frame值为0的行
31
+ df = df[df['frame'] > 0]
32
+
33
+ # 判断df是否为空
34
+ if len(df) <= 0:
35
+ return False
36
+
37
+ return True
38
+
39
+
40
+ def save_video(images, fps, extension='.mp4'):
41
+ if not os.path.exists(shared.opts.data.get("mov2mov_output_dir", mov2mov_output_dir)):
42
+ os.makedirs(shared.opts.data.get("mov2mov_output_dir", mov2mov_output_dir), exist_ok=True)
43
+
44
+ r_f = extension
45
+
46
+ print(f'Start generating {r_f} file')
47
+
48
+ video = images_to_video(images, fps,
49
+ os.path.join(shared.opts.data.get("mov2mov_output_dir", mov2mov_output_dir),
50
+ str(int(time.time())) + r_f, ))
51
+ print(f'The generation is complete, the directory::{video}')
52
+
53
+ return video
54
+
55
+
56
+ def process_mov2mov(p, mov_file, movie_frames, max_frames, resize_mode, w, h, args):
57
+ processing.fix_seed(p)
58
+ images = get_mov_all_images(mov_file, movie_frames)
59
+ if not images:
60
+ print('Failed to parse the video, please check')
61
+ return
62
+
63
+ print(f'The video conversion is completed, images:{len(images)}')
64
+ if max_frames == -1 or max_frames > len(images):
65
+ max_frames = len(images)
66
+
67
+ max_frames = int(max_frames)
68
+
69
+ p.do_not_save_grid = True
70
+ state.job_count = max_frames # * p.n_iter
71
+ generate_images = []
72
+ for i, image in enumerate(images):
73
+ if i >= max_frames:
74
+ break
75
+
76
+ state.job = f"{i + 1} out of {max_frames}"
77
+ if state.skipped:
78
+ state.skipped = False
79
+
80
+ if state.interrupted:
81
+ break
82
+
83
+ # 存一张底图
84
+ img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB), 'RGB')
85
+
86
+ p.init_images = [img] * p.batch_size
87
+ proc = scripts_mov2mov.run(p, *args)
88
+ if proc is None:
89
+ print(f'current progress: {i + 1}/{max_frames}')
90
+ processed = process_images(p)
91
+ # 只取第一张
92
+ gen_image = processed.images[0]
93
+ generate_images.append(gen_image)
94
+
95
+ video = save_video(generate_images, movie_frames)
96
+
97
+ return video
98
+
99
+
100
+ def process_keyframes(p, mov_file, fps, df, args):
101
+ processing.fix_seed(p)
102
+ images = get_mov_all_images(mov_file, fps)
103
+ if not images:
104
+ print('Failed to parse the video, please check')
105
+ return
106
+
107
+ # 通过宽高,缩放模式,预处理图片
108
+ images = [PIL.Image.fromarray(image) for image in images]
109
+ images = [modules.images.resize_image(p.resize_mode, image, p.width, p.height) for image in images]
110
+ images = [np.asarray(image) for image in images]
111
+
112
+ default_prompt = p.prompt
113
+ max_frames = len(df)
114
+
115
+ p.do_not_save_grid = True
116
+ state.job_count = max_frames # * p.n_iter
117
+ generate_images = []
118
+
119
+ for i, row in df.iterrows():
120
+ p.prompt = default_prompt + row['prompt']
121
+ frame = images[row['frame'] - 1]
122
+
123
+ state.job = f"{i + 1} out of {max_frames}"
124
+ if state.skipped:
125
+ state.skipped = False
126
+
127
+ if state.interrupted:
128
+ break
129
+
130
+ img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), 'RGB')
131
+ p.init_images = [img]
132
+ proc = scripts_mov2mov.run(p, *args)
133
+ if proc is None:
134
+ print(f'current progress: {i + 1}/{max_frames}')
135
+ processed = process_images(p)
136
+ gen_image = processed.images[0]
137
+
138
+ if gen_image.height != p.height or gen_image.width != p.width:
139
+ print(f'Warning: The generated image size is inconsistent with the original image size, '
140
+ f'please check the configuration parameters')
141
+ gen_image = gen_image.resize((p.width, p.height))
142
+
143
+ keyframe = Keyframe(row['frame'], np.asarray(gen_image), row['prompt'])
144
+ generate_images.append(keyframe)
145
+
146
+ # 由于生成图片可能会产生像素偏差,这里再对齐一次宽高
147
+ images = [PIL.Image.fromarray(image) for image in images]
148
+ images = [image.resize(p.width, p.height) if image.width != p.width or image.height != p.height else image for image
149
+ in images]
150
+ images = [np.asarray(image) for image in images]
151
+
152
+ return generate_images, images
153
+
154
+
155
+ def process_mov2mov_ebsynth(p, eb_generate, weight=4.0):
156
+ from ebsynth._ebsynth import task as EbsyncthRun
157
+ tasks = eb_generate.get_tasks(weight)
158
+ tasks_len = len(tasks)
159
+ state.job_count = tasks_len # * p.n_iter
160
+
161
+ for i, task in tqdm(enumerate(tasks)):
162
+
163
+ state.job = f"{i + 1} out of {tasks_len}"
164
+ if state.skipped:
165
+ state.skipped = False
166
+
167
+ if state.interrupted:
168
+ break
169
+
170
+ result = EbsyncthRun(task.style, [(task.source, task.target, task.weight)])
171
+ eb_generate.append_generate_frames(task.key_frame_num, task.frame_num, result)
172
+ state.nextjob()
173
+
174
+ print(f'Start merge frames')
175
+ result = eb_generate.merge_sequences()
176
+ video = save_video(result, eb_generate.fps)
177
+ return video
178
+
179
+
180
+ def mov2mov(id_task: str,
181
+ prompt,
182
+ negative_prompt,
183
+ prompt_styles,
184
+ mov_file,
185
+ steps,
186
+ sampler_name,
187
+ cfg_scale,
188
+ image_cfg_scale,
189
+ denoising_strength,
190
+ height,
191
+ width,
192
+ resize_mode,
193
+ override_settings_texts,
194
+
195
+ # refiner
196
+ # enable_refiner, refiner_checkpoint, refiner_switch_at,
197
+ # mov2mov params
198
+
199
+ noise_multiplier,
200
+ movie_frames,
201
+ max_frames,
202
+ # editor
203
+ enable_movie_editor,
204
+ df: pandas.DataFrame,
205
+ eb_weight,
206
+
207
+ *args):
208
+ if not mov_file:
209
+ raise Exception('Error! Please add a video file!')
210
+
211
+ override_settings = create_override_settings_dict(override_settings_texts)
212
+ assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
213
+ mask_blur = 4
214
+ inpainting_fill = 1
215
+ inpaint_full_res = False
216
+ inpaint_full_res_padding = 32
217
+ inpainting_mask_invert = 0
218
+
219
+ p = StableDiffusionProcessingImg2Img(
220
+ sd_model=shared.sd_model,
221
+ outpath_samples=shared.opts.data.get("mov2mov_outpath_samples", mov2mov_outpath_samples),
222
+ outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
223
+ prompt=prompt,
224
+ negative_prompt=negative_prompt,
225
+ styles=prompt_styles,
226
+ sampler_name=sampler_name,
227
+ batch_size=1,
228
+ n_iter=1,
229
+ steps=steps,
230
+ cfg_scale=cfg_scale,
231
+ width=width,
232
+ height=height,
233
+ init_images=[None],
234
+ mask=None,
235
+
236
+ mask_blur=mask_blur,
237
+ inpainting_fill=inpainting_fill,
238
+ resize_mode=resize_mode,
239
+ denoising_strength=denoising_strength,
240
+ image_cfg_scale=image_cfg_scale,
241
+ inpaint_full_res=inpaint_full_res,
242
+ inpaint_full_res_padding=inpaint_full_res_padding,
243
+ inpainting_mask_invert=inpainting_mask_invert,
244
+ override_settings=override_settings,
245
+ initial_noise_multiplier=noise_multiplier
246
+
247
+ )
248
+
249
+ p.scripts = scripts_mov2mov
250
+ p.script_args = args
251
+ # print('script_args', args)
252
+ #
253
+ # if not enable_refiner or refiner_checkpoint in (None, "", "None"):
254
+ # p.refiner_checkpoint = None
255
+ # p.refiner_switch_at = None
256
+ # else:
257
+ # p.refiner_checkpoint = refiner_checkpoint
258
+ # p.refiner_switch_at = refiner_switch_at
259
+
260
+ if shared.cmd_opts.enable_console_prompts:
261
+ print(f"\nmov2mov: {prompt}", file=shared.progress_print_out)
262
+
263
+ p.extra_generation_params["Mask blur"] = mask_blur
264
+
265
+ if not enable_movie_editor:
266
+ print(f'\nStart parsing the number of mov frames')
267
+ generate_video = process_mov2mov(p, mov_file, movie_frames, max_frames, resize_mode, width, height, args)
268
+ processed = Processed(p, [], p.seed, "")
269
+ else:
270
+ # editor
271
+ if platform.system() != 'Windows':
272
+ raise Exception('The Movie Editor is currently only supported on Windows')
273
+
274
+ # check df no frame
275
+ if not check_data_frame(df):
276
+ raise Exception('Please add a frame in the Movie Editor or disable it')
277
+
278
+ # sort df for index
279
+ df = df.sort_values(by='frame').reset_index(drop=True)
280
+
281
+ # generate keyframes
282
+ print(f'Start generate keyframes')
283
+ keyframes, frames = process_keyframes(p, mov_file, movie_frames, df, args)
284
+ eb_generate = EbsynthGenerate(keyframes, frames, movie_frames)
285
+ print(f'\nStart generate frames')
286
+
287
+ generate_video = process_mov2mov_ebsynth(p, eb_generate, weight=eb_weight)
288
+
289
+ processed = Processed(p, [], p.seed, "")
290
+ p.close()
291
+
292
+ shared.total_tqdm.clear()
293
+
294
+ generation_info_js = processed.js()
295
+ if opts.samples_log_stdout:
296
+ print(generation_info_js)
297
+
298
+ if opts.do_not_show_images:
299
+ processed.images = []
300
+
301
+ return generate_video, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(
302
+ processed.comments, classname="comments")
scripts/movie_editor.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+
3
+ import gradio as gr
4
+ import pandas
5
+ from PIL import Image
6
+ from tqdm import tqdm
7
+
8
+ from modules import shared, deepbooru
9
+ from modules.ui_components import InputAccordion, ToolButton
10
+ from scripts import m2m_util
11
+
12
+
13
+ class MovieEditor:
14
+ def __init__(self, id_part, gr_movie: gr.Video, gr_fps: gr.Slider):
15
+ self.gr_eb_weight = None
16
+ self.gr_df = None
17
+ self.gr_keyframe = None
18
+ self.gr_frame_number = None
19
+ self.gr_frame_image = None
20
+ self.gr_movie = gr_movie
21
+ self.gr_fps = gr_fps
22
+ self.gr_enable_movie_editor = None
23
+
24
+ self.is_windows = platform.system() == "Windows"
25
+ self.id_part = id_part
26
+ self.frames = []
27
+ self.frame_count = 0
28
+ self.selected_data_frame = -1
29
+
30
+ def render(self):
31
+ id_part = self.id_part
32
+ with InputAccordion(
33
+ True, label="Movie Editor", elem_id=f"{id_part}_editor_accordion"
34
+ ):
35
+ gr.HTML(
36
+ "<div style='color:red;font-weight: bold;border: 2px solid yellow;padding: 10px;font-size: 20px;'>"
37
+ "This feature is in beta version!!! <br>"
38
+ "It only supports Windows!!! <br>"
39
+ "Make sure you have installed the ControlNet and IP-Adapter models."
40
+ "</div>"
41
+ )
42
+
43
+ self.gr_enable_movie_editor = gr.Checkbox(
44
+ label="Enable Movie Editor",
45
+ elem_id=f"{id_part}_editor_enable",
46
+ )
47
+
48
+ self.gr_frame_image = gr.Image(
49
+ label="Frame",
50
+ elem_id=f"{id_part}_video_frame",
51
+ source="upload",
52
+ visible=False,
53
+ height=480,
54
+ )
55
+
56
+ # key frame tabs
57
+ with gr.Tabs(elem_id=f"{id_part}_keyframe_tabs"):
58
+ with gr.TabItem("Custom", id=f"{id_part}_keyframe_tab_custom"):
59
+ with gr.Row():
60
+ self.gr_frame_number = gr.Slider(
61
+ label="Frame number",
62
+ elem_id=f"{id_part}_video_frame_number",
63
+ step=1,
64
+ maximum=0,
65
+ minimum=0,
66
+ )
67
+
68
+ with gr.TabItem("Auto", elem_id=f"{id_part}_keyframe_tab_auto"):
69
+ with gr.Row():
70
+ key_frame_interval = gr.Slider(
71
+ label="Key frame interval",
72
+ elem_id=f"{id_part}_video_keyframe_interval",
73
+ step=1,
74
+ maximum=100,
75
+ minimum=0,
76
+ value=2,
77
+ )
78
+ key_frame_interval_generate = ToolButton(
79
+ "♺",
80
+ elem_id=f"{id_part}_video_editor_auto_keyframe",
81
+ visible=True,
82
+ tooltip="generate",
83
+ )
84
+
85
+ with gr.Row(elem_id=f"{id_part}_keyframe_custom_container"):
86
+ add_keyframe = ToolButton(
87
+ "✚",
88
+ elem_id=f"{id_part}_video_editor_add_keyframe",
89
+ visible=True,
90
+ tooltip="Add keyframe",
91
+ )
92
+ remove_keyframe = ToolButton(
93
+ "✖",
94
+ elem_id=f"{id_part}_video_editor_remove_keyframe",
95
+ visible=True,
96
+ tooltip="Remove selected keyframe",
97
+ )
98
+
99
+ clear_keyframe = ToolButton(
100
+ "🗑",
101
+ elem_id=f"{id_part}_video_editor_clear_keyframe",
102
+ visible=True,
103
+ tooltip="Clear keyframe",
104
+ )
105
+
106
+ with gr.Row():
107
+ data_frame = gr.Dataframe(
108
+ headers=["id", "frame", "prompt"],
109
+ datatype=["number", "number", "str"],
110
+ row_count=1,
111
+ col_count=(3, "fixed"),
112
+ max_rows=None,
113
+ height=480,
114
+ elem_id=f"{id_part}_video_editor_custom_data_frame",
115
+ )
116
+ self.gr_df = data_frame
117
+
118
+ with gr.Row():
119
+ interrogate = gr.Button(
120
+ value="Clip Interrogate Keyframe",
121
+ size="sm",
122
+ elem_id=f"{id_part}_video_editor_interrogate",
123
+ )
124
+ deepbooru = gr.Button(
125
+ value="Deepbooru Keyframe",
126
+ size="sm",
127
+ elem_id=f"{id_part}_video_editor_deepbooru",
128
+ )
129
+
130
+ with gr.Row():
131
+ self.gr_eb_weight = gr.Slider(
132
+ label="EbSynth weight",
133
+ elem_id=f"{id_part}_video_eb_weight",
134
+ step=0.1,
135
+ maximum=10,
136
+ minimum=0,
137
+ value=4.0,
138
+ )
139
+
140
+ self.gr_movie.change(
141
+ fn=self.movie_change,
142
+ inputs=[self.gr_movie],
143
+ outputs=[self.gr_frame_image, self.gr_frame_number, self.gr_fps],
144
+ show_progress=True,
145
+ )
146
+
147
+ self.gr_frame_number.change(
148
+ fn=self.movie_frame_change,
149
+ inputs=[self.gr_movie, self.gr_frame_number],
150
+ outputs=[self.gr_frame_image],
151
+ show_progress=True,
152
+ )
153
+
154
+ self.gr_fps.change(
155
+ fn=self.fps_change,
156
+ inputs=[self.gr_movie, self.gr_fps],
157
+ outputs=[self.gr_frame_image, self.gr_frame_number],
158
+ show_progress=True,
159
+ )
160
+
161
+ data_frame.select(self.data_frame_select, data_frame, self.gr_frame_number)
162
+
163
+ add_keyframe.click(
164
+ fn=self.add_keyframe_click,
165
+ inputs=[data_frame, self.gr_frame_number],
166
+ outputs=[data_frame],
167
+ show_progress=False,
168
+ )
169
+ remove_keyframe.click(
170
+ fn=self.remove_keyframe_click,
171
+ inputs=[data_frame],
172
+ outputs=[data_frame],
173
+ show_progress=False,
174
+ )
175
+
176
+ clear_keyframe.click(
177
+ fn=lambda df: df.drop(df.index, inplace=True),
178
+ inputs=[data_frame],
179
+ outputs=[data_frame],
180
+ show_progress=False,
181
+ )
182
+
183
+ key_frame_interval_generate.click(
184
+ fn=self.key_frame_interval_generate_click,
185
+ inputs=[data_frame, key_frame_interval],
186
+ outputs=[data_frame],
187
+ show_progress=True,
188
+ )
189
+
190
+ interrogate.click(
191
+ fn=self.interrogate_keyframe,
192
+ inputs=[data_frame],
193
+ outputs=[data_frame],
194
+ show_progress=True,
195
+ )
196
+
197
+ deepbooru.click(
198
+ fn=self.deepbooru_keyframe,
199
+ inputs=[data_frame],
200
+ outputs=[data_frame],
201
+ show_progress=True,
202
+ )
203
+
204
+ def interrogate_keyframe(self, data_frame: pandas.DataFrame):
205
+ """
206
+ Interrogate key frame
207
+ """
208
+ bar = tqdm(total=len(data_frame))
209
+ for index, row in data_frame.iterrows():
210
+ if row["frame"] <= 0:
211
+ continue
212
+ bar.set_description(f'Interrogate key frame {row["frame"]}')
213
+ frame = row["frame"] - 1
214
+ image = self.frames[frame]
215
+ image = Image.fromarray(image)
216
+ prompt = shared.interrogator.interrogate(image.convert("RGB"))
217
+ data_frame.at[index, "prompt"] = prompt
218
+ bar.update(1)
219
+
220
+ return data_frame
221
+
222
+ def deepbooru_keyframe(self, data_frame: pandas.DataFrame):
223
+ """
224
+ Deepbooru key frame
225
+
226
+ """
227
+ bar = tqdm(total=len(data_frame))
228
+ for index, row in data_frame.iterrows():
229
+ if row["frame"] <= 0:
230
+ continue
231
+ bar.set_description(f'Interrogate key frame {row["frame"]}')
232
+ frame = row["frame"] - 1
233
+ image = self.frames[frame]
234
+ image = Image.fromarray(image)
235
+ prompt = deepbooru.model.tag(image)
236
+ data_frame.at[index, "prompt"] = prompt
237
+ bar.update(1)
238
+
239
+ return data_frame
240
+
241
+ def data_frame_select(self, event: gr.SelectData, data_frame: pandas.DataFrame):
242
+ row, col = event.index
243
+ self.selected_data_frame = row
244
+ row = data_frame.iloc[row]
245
+ frame = row["frame"]
246
+ if 0 < frame <= self.frame_count:
247
+ return int(frame)
248
+ else:
249
+ return 0
250
+
251
+ def add_keyframe_click(self, data_frame: pandas.DataFrame, gr_frame_number: int):
252
+ """
253
+ Add a key frame to the data frame
254
+ """
255
+ if gr_frame_number < 1:
256
+ return data_frame
257
+
258
+ data_frame = data_frame[data_frame["frame"] > 0]
259
+
260
+ if gr_frame_number in data_frame["frame"].values:
261
+ return data_frame
262
+
263
+ row = {"id": len(data_frame), "frame": gr_frame_number, "prompt": ""}
264
+ data_frame.loc[len(data_frame)] = row
265
+
266
+ data_frame = data_frame.sort_values(by="frame").reset_index(drop=True)
267
+
268
+ data_frame["id"] = range(len(data_frame))
269
+
270
+ return data_frame
271
+
272
+ def remove_keyframe_click(self, data_frame: pandas.DataFrame):
273
+ """
274
+ Remove the selected key frame
275
+ """
276
+ if self.selected_data_frame < 0:
277
+ return data_frame
278
+
279
+ data_frame = data_frame.drop(self.selected_data_frame)
280
+
281
+ data_frame = data_frame.sort_values(by="frame").reset_index(drop=True)
282
+
283
+ data_frame["id"] = range(len(data_frame))
284
+
285
+ return data_frame
286
+
287
+ def key_frame_interval_generate_click(
288
+ self, data_frame: pandas.DataFrame, key_frame_interval: int
289
+ ):
290
+ if key_frame_interval < 1:
291
+ return data_frame
292
+
293
+ # 按照key_frame_interval的间隔添加关键帧
294
+ for i in range(0, self.frame_count, key_frame_interval):
295
+ data_frame = self.add_keyframe_click(data_frame, i + 1)
296
+
297
+ # 添加最后一帧
298
+ data_frame = self.add_keyframe_click(data_frame, self.frame_count)
299
+
300
+ return data_frame
301
+
302
+ def movie_change(self, movie_path):
303
+ if not movie_path:
304
+ return (
305
+ gr.Image.update(visible=False),
306
+ gr.Slider.update(maximum=0, minimum=0),
307
+ gr.Slider.update(),
308
+ )
309
+ fps = m2m_util.get_mov_fps(movie_path)
310
+ self.frames = m2m_util.get_mov_all_images(movie_path, fps, True)
311
+
312
+ self.frame_count = len(self.frames)
313
+ return (
314
+ gr.Image.update(visible=True),
315
+ gr.Slider.update(maximum=self.frame_count, minimum=0, value=0),
316
+ gr.Slider.update(maximum=fps, minimum=0, value=fps),
317
+ )
318
+
319
+ def movie_frame_change(self, movie_path, frame_number):
320
+ if not movie_path:
321
+ return gr.Image.update(visible=False)
322
+
323
+ if frame_number <= 0:
324
+ return gr.Image.update(
325
+ visible=True, label=f"Frame: {frame_number}", value=None
326
+ )
327
+
328
+ return gr.Image.update(
329
+ visible=True,
330
+ label=f"Frame: {frame_number}",
331
+ value=self.frames[frame_number - 1],
332
+ )
333
+
334
+ def fps_change(self, movie_path, fps):
335
+ if not movie_path:
336
+ return gr.Image.update(visible=False), gr.Slider.update(
337
+ maximum=0, minimum=0
338
+ )
339
+
340
+ self.frames = m2m_util.get_mov_all_images(movie_path, fps, True)
341
+ self.frame_count = len(self.frames)
342
+ return (
343
+ gr.Image.update(visible=True),
344
+ gr.Slider.update(maximum=self.frame_count, minimum=0, value=0),
345
+ )
style.css ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #mov2mov_interrupt, #mov2mov_skip {
2
+ position: absolute;
3
+ width: 50%;
4
+ height: 100%;
5
+ background: #b4c0cc;
6
+ display: none;
7
+ }
8
+
9
+ /*#mov2mov_modnet_model, #mov2mov_merge_background {*/
10
+ /* display: none;*/
11
+ /*}*/
12
+
13
+ #modnet_background_movie {
14
+ display: none;
15
+ }
16
+
17
+
18
+ #mov2mov_video {
19
+ display: none;
20
+ }
21
+
22
+ #mov2mov_generate {
23
+ min-height: 7.5em;
24
+ }
25
+
26
+ @media screen and (min-width: 2500px) {
27
+ #mov2mov_gallery {
28
+ min-height: 768px;
29
+ }
30
+ }
31
+
32
+ #mov2mov_gallery img {
33
+ object-fit: scale-down;
34
+ }
35
+
36
+ #mov2mov_tools {
37
+ gap: 0.4em;
38
+ }
39
+
40
+ #mov2mov_actions_column {
41
+ margin: 0.35rem 0.75rem 0.35rem 0;
42
+ }
43
+
44
+
45
+ #mov2mov_actions_column {
46
+ gap: 0;
47
+ margin-right: .75rem;
48
+ }
49
+
50
+
51
+ #mov2mov_styles_row {
52
+ gap: 0.25em;
53
+ margin-top: 0.3em;
54
+ }
55
+
56
+ #mov2mov_styles_row > button {
57
+ margin: 0;
58
+ }
59
+
60
+ #mov2mov_styles {
61
+ padding: 0;
62
+ }
63
+
64
+ #mov2mov_styles > label > div {
65
+ min-height: 3.2em;
66
+ }
67
+
68
+ #mov2mov_extra_networks .search {
69
+ display: inline-block;
70
+ max-width: 16em;
71
+ margin: 0.3em;
72
+ align-self: center;
73
+ }
74
+
75
+ #mov2mov_extra_view {
76
+ width: auto;
77
+ }
78
+
79
+ #mov2mov_preview {
80
+ position: absolute;
81
+ width: 320px;
82
+ left: 0;
83
+ right: 0;
84
+ margin-left: auto;
85
+ margin-right: auto;
86
+ margin-top: 34px;
87
+ z-index: 100;
88
+ border: none;
89
+ border-top-left-radius: 0;
90
+ border-top-right-radius: 0;
91
+ }
92
+
93
+ @media screen and (min-width: 768px) {
94
+ #mov2mov_preview {
95
+ position: absolute;
96
+ }
97
+ }
98
+
99
+ @media screen and (max-width: 767px) {
100
+ #mov2mov_preview {
101
+ position: relative;
102
+ }
103
+ }
104
+
105
+ #mov2mov_preview div.left-0.top-0 {
106
+ display: none;
107
+ }
108
+
109
+
110
+ #mov2mov_interrupt, #mov2mov_skip {
111
+ position: absolute;
112
+ width: 50%;
113
+ height: 100%;
114
+ background: #b4c0cc;
115
+ display: none;
116
+ }
117
+
118
+ #mov2mov_interrupt {
119
+ left: 0;
120
+ border-radius: 0.5rem 0 0 0.5rem;
121
+ }
122
+
123
+ #mov2mov_skip {
124
+ right: 0;
125
+ border-radius: 0 0.5rem 0.5rem 0;
126
+ }
127
+
128
+ #mov2mov_mov video {
129
+ height: 480px;
130
+ max-height: 480px;
131
+ min-height: 480px;
132
+ }
133
+
134
+ #mov2mov_video video {
135
+ height: 480px;
136
+ max-height: 480px;
137
+ min-height: 480px;
138
+ }
139
+
140
+ #mov2mov_video_frame img {
141
+ height: 480px;
142
+ max-height: 480px;
143
+ min-height: 480px;
144
+ }
145
+
146
+
147
+ #mov2mov_checkboxes {
148
+ margin-bottom: 0.5em;
149
+ margin-left: 0em;
150
+ }
151
+
152
+ #mov2mov_checkboxes > div {
153
+ flex: 0;
154
+ white-space: nowrap;
155
+ min-width: auto;
156
+ }
157
+
158
+ #mov2mov_extra_view {
159
+ width: auto;
160
+ }
161
+
162
+ /* dataframe */
163
+ #mov2mov_video_editor_custom_data_frame button {
164
+ display: none;
165
+ }
166
+
167
+ #mov2mov_keyframe_custom_container>div {
168
+ display: flex;
169
+ justify-content: center;
170
+ /*增加间距*/
171
+ gap: 1.5em;
172
+ }
tests/ebsynth/__init__.py ADDED
File without changes
tests/ebsynth/ebsynth_generate_test.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import numpy as np
4
+ import unittest
5
+ import cv2
6
+
7
+ import importlib
8
+
9
+ utils = importlib.import_module("extensions.sd-webui-mov2mov.tests.utils", "utils")
10
+ utils.setup_test_env()
11
+
12
+ from ebsynth.ebsynth_generate import EbsynthGenerate, Keyframe, Sequence
13
+
14
+
15
+ class EbsynthGenerateTestCase(unittest.TestCase):
16
+ def get_image(self, folder, name):
17
+ return cv2.imread(os.path.join(os.path.dirname(__file__), 'images', folder, name))
18
+
19
+ def test_sequences(self):
20
+ # 模拟100帧的视频
21
+ keyframes = [
22
+ Keyframe(1, None, None),
23
+ Keyframe(10, None, None),
24
+ Keyframe(20, None, None),
25
+ Keyframe(30, None, None),
26
+
27
+ ]
28
+ frames = [np.zeros((100, 100, 3))] * 40
29
+
30
+ eb_generate = EbsynthGenerate(keyframes, frames, 24)
31
+ eb_generate.setup_sequences()
32
+ self.assertEqual(len(eb_generate.sequences), 4)
33
+ self.assertEqual(eb_generate.sequences[0].start, 1)
34
+ self.assertEqual(eb_generate.sequences[0].keyframe.num, 1)
35
+ self.assertEqual(eb_generate.sequences[0].end, 10)
36
+
37
+ self.assertEqual(eb_generate.sequences[1].start, 1)
38
+ self.assertEqual(eb_generate.sequences[1].keyframe.num, 10)
39
+ self.assertEqual(eb_generate.sequences[1].end, 20)
40
+
41
+ self.assertEqual(eb_generate.sequences[2].start, 10)
42
+ self.assertEqual(eb_generate.sequences[2].keyframe.num, 20)
43
+ self.assertEqual(eb_generate.sequences[2].end, 30)
44
+
45
+ self.assertEqual(eb_generate.sequences[3].start, 20)
46
+ self.assertEqual(eb_generate.sequences[3].keyframe.num, 30)
47
+ self.assertEqual(eb_generate.sequences[3].end, 40)
48
+
49
+ self.assertEqual(list(eb_generate.sequences[0].frames.keys()), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
50
+ self.assertEqual(list(eb_generate.sequences[1].frames.keys()), [i for i in range(1, 21)])
51
+ self.assertEqual(list(eb_generate.sequences[2].frames.keys()), [i for i in range(10, 31)])
52
+ self.assertEqual(list(eb_generate.sequences[3].frames.keys()), [i for i in range(20, 41)])
53
+
54
+ keyframes = [
55
+ Keyframe(1, None, None),
56
+ Keyframe(3, None, None),
57
+ Keyframe(5, None, None),
58
+ ]
59
+ frames = [np.zeros((100, 100, 3))] * 10
60
+
61
+ eb_generate = EbsynthGenerate(keyframes, frames, 24)
62
+ eb_generate.setup_sequences()
63
+
64
+ self.assertEqual(len(eb_generate.sequences), 3)
65
+ self.assertEqual(eb_generate.sequences[0].start, 1)
66
+ self.assertEqual(eb_generate.sequences[0].keyframe.num, 1)
67
+ self.assertEqual(eb_generate.sequences[0].end, 3)
68
+
69
+ self.assertEqual(eb_generate.sequences[1].start, 1)
70
+ self.assertEqual(eb_generate.sequences[1].keyframe.num, 3)
71
+ self.assertEqual(eb_generate.sequences[1].end, 5)
72
+
73
+ self.assertEqual(eb_generate.sequences[2].start, 3)
74
+ self.assertEqual(eb_generate.sequences[2].keyframe.num, 5)
75
+ self.assertEqual(eb_generate.sequences[2].end, 10)
76
+
77
+ keyframes = [
78
+ Keyframe(1, None, None),
79
+ Keyframe(3, None, None),
80
+ Keyframe(5, None, None),
81
+ ]
82
+ frames = [np.zeros((100, 100, 3))] * 5
83
+
84
+ eb_generate = EbsynthGenerate(keyframes, frames, 24)
85
+ eb_generate.setup_sequences()
86
+
87
+ self.assertEqual(len(eb_generate.sequences), 3)
88
+ self.assertEqual(eb_generate.sequences[0].start, 1)
89
+ self.assertEqual(eb_generate.sequences[0].keyframe.num, 1)
90
+ self.assertEqual(eb_generate.sequences[0].end, 3)
91
+
92
+ self.assertEqual(eb_generate.sequences[1].start, 1)
93
+ self.assertEqual(eb_generate.sequences[1].keyframe.num, 3)
94
+ self.assertEqual(eb_generate.sequences[1].end, 5)
95
+
96
+ self.assertEqual(eb_generate.sequences[2].start, 3)
97
+ self.assertEqual(eb_generate.sequences[2].keyframe.num, 5)
98
+ self.assertEqual(eb_generate.sequences[2].end, 5)
99
+
100
+ def test_get_guides(self):
101
+ keyframes = [Keyframe(i, self.get_image('keys', f'{i:04d}.png'), '') for i in range(1, 72, 10)]
102
+ frames = [self.get_image('video', f'{i:04d}.png') for i in range(0, 73)]
103
+
104
+ eb_generate = EbsynthGenerate(keyframes, frames, 24)
105
+ tasks = eb_generate.get_tasks(4.0)
106
+ num = 0
107
+ for sequence in eb_generate.sequences:
108
+ for i in range(sequence.start, sequence.end + 1):
109
+ self.assertEqual(tasks[num].frame_num, i)
110
+ num += 1
111
+
112
+
113
+ if __name__ == '__main__':
114
+ unittest.main()
tests/ebsynth/ebsynth_test.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import os
3
+ import unittest
4
+
5
+ import cv2
6
+
7
+ utils = importlib.import_module("extensions.sd-webui-mov2mov.tests.utils", "utils")
8
+ utils.setup_test_env()
9
+
10
+ from ebsynth.ebsynth_generate import EbsynthGenerate, Keyframe, Sequence
11
+ from ebsynth._ebsynth import task as EbsyncthRun
12
+ from scripts import m2m_util
13
+
14
+
15
+ class MyTestCase(unittest.TestCase):
16
+ def get_image(self, folder, name):
17
+ return cv2.imread(os.path.join(os.path.dirname(__file__), 'images', folder, name))
18
+
19
+ def setUp(self) -> None:
20
+ self.keyframes = [Keyframe(i, self.get_image('keys', f'{i:04d}.png'), '') for i in range(1, 72, 10)]
21
+ frames = [self.get_image('video', f'{i:04d}.png') for i in range(0, 72)]
22
+
23
+ self.eb_generate = EbsynthGenerate(self.keyframes, frames, 24)
24
+
25
+ def test_keyframes(self):
26
+ for i, sequence in enumerate(self.eb_generate.sequences):
27
+ self.assertEqual(sequence.keyframe.num, self.keyframes[i].num)
28
+ self.assertTrue((sequence.keyframe.image == self.keyframes[i].image).all())
29
+
30
+ def test_task(self):
31
+ """
32
+ 测试生成的任务是否正确
33
+
34
+ Returns:
35
+
36
+ """
37
+
38
+ tasks = self.eb_generate.get_tasks(4.0)
39
+ for task in tasks:
40
+ result = EbsyncthRun(task.style, [(task.source, task.target, task.weight)])
41
+ dir_name = os.path.join(os.path.dirname(__file__), 'images', 'test', f'out_{task.key_frame_num}')
42
+ if not os.path.exists(dir_name):
43
+ os.mkdir(dir_name)
44
+ cv2.imwrite(os.path.join(dir_name, f'{task.frame_num:04d}.png'), result)
45
+
46
+ def test_merge(self):
47
+ """
48
+ 测试merge是否正确
49
+
50
+ """
51
+
52
+ def get_sequence(keyframe_num):
53
+ for sequence in self.eb_generate.sequences:
54
+ if sequence.keyframe.num == keyframe_num:
55
+ return sequence
56
+ else:
57
+ raise ValueError(f'not found key frame num {keyframe_num}')
58
+
59
+ # 模拟结果
60
+ test_dir = os.path.join(os.path.dirname(__file__), 'images', 'test')
61
+
62
+ # 获取out_{keyframe}文件夹
63
+ for keyframe in self.keyframes:
64
+ out_dir = os.path.join(test_dir, f'out_{keyframe.num:04d}')
65
+ # 获取out_{keyframe}文件夹下的所有文件,并且按照 {i:04d}.png 的顺序添加到eb_generate.generate_frames
66
+ sequence = get_sequence(keyframe.num)
67
+ for i in range(sequence.start, sequence.end + 1):
68
+ self.eb_generate.append_generate_frames(keyframe.num, i,
69
+ cv2.imread(os.path.join(out_dir, f'{i:04d}.png')))
70
+ # 测试merge
71
+ result = self.eb_generate.merge_sequences(0.4)
72
+
73
+ if not os.path.exists(os.path.join(test_dir, 'merge_1')):
74
+ os.mkdir(os.path.join(test_dir, 'merge_1'))
75
+
76
+ frames = []
77
+
78
+ for i, frame in enumerate(result):
79
+ if frame is not None:
80
+ cv2.imwrite(os.path.join(test_dir, 'merge_1', f'{i:04d}.png'), frame)
81
+ frames.append(frame)
82
+ m2m_util.images_to_video(frames, self.eb_generate.fps,
83
+ os.path.join(test_dir, 'merge_1', f'm.mp4'))
84
+
85
+
86
+ if __name__ == '__main__':
87
+ unittest.main()
tests/utils.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import os
4
+
5
+
6
+ def setup_test_env():
7
+ ext_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
8
+ if ext_root not in sys.path:
9
+ sys.path.append(ext_root)