777Andy777 hysts HF staff commited on
Commit
40922a3
0 Parent(s):

Duplicate from hysts/ControlNet-v1-1

Browse files

Co-authored-by: hysts <hysts@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio_cached_examples/
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ share/python-wheels/
26
+ *.egg-info/
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # poetry
100
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
+ #poetry.lock
105
+
106
+ # pdm
107
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
+ #pdm.lock
109
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
+ # in version control.
111
+ # https://pdm.fming.dev/#use-with-ide
112
+ .pdm.toml
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.2.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: double-quote-string-fixer
12
+ - id: end-of-file-fixer
13
+ - id: mixed-line-ending
14
+ args: ['--fix=lf']
15
+ - id: requirements-txt-fixer
16
+ - id: trailing-whitespace
17
+ - repo: https://github.com/myint/docformatter
18
+ rev: v1.4
19
+ hooks:
20
+ - id: docformatter
21
+ args: ['--in-place']
22
+ - repo: https://github.com/pycqa/isort
23
+ rev: 5.12.0
24
+ hooks:
25
+ - id: isort
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v0.991
28
+ hooks:
29
+ - id: mypy
30
+ args: ['--ignore-missing-imports']
31
+ additional_dependencies: ['types-python-slugify']
32
+ - repo: https://github.com/google/yapf
33
+ rev: v0.32.0
34
+ hooks:
35
+ - id: yapf
36
+ args: ['--parallel', '--in-place']
.style.yapf ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [style]
2
+ based_on_style = pep8
3
+ blank_line_before_nested_class_or_def = false
4
+ spaces_before_comment = 2
5
+ split_before_logical_operator = true
.vscode/settings.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "python.linting.enabled": true,
3
+ "python.linting.flake8Enabled": true,
4
+ "python.linting.pylintEnabled": false,
5
+ "python.linting.lintOnSave": true,
6
+ "python.formatting.provider": "yapf",
7
+ "python.formatting.yapfArgs": [
8
+ "--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
9
+ ],
10
+ "[python]": {
11
+ "editor.formatOnType": true,
12
+ "editor.codeActionsOnSave": {
13
+ "source.organizeImports": true
14
+ }
15
+ },
16
+ "editor.formatOnSave": true,
17
+ "files.insertFinalNewline": true
18
+ }
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 hysts
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
LICENSE.ControlNet ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ControlNet V1.1
3
+ emoji: 📉
4
+ colorFrom: yellow
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.36.1
8
+ python_version: 3.10.11
9
+ app_file: app.py
10
+ pinned: false
11
+ license: mit
12
+ suggested_hardware: t4-medium
13
+ duplicated_from: hysts/ControlNet-v1-1
14
+ ---
15
+
16
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import annotations
4
+
5
+ import gradio as gr
6
+ import torch
7
+
8
+ from app_canny import create_demo as create_demo_canny
9
+ from app_depth import create_demo as create_demo_depth
10
+ from app_ip2p import create_demo as create_demo_ip2p
11
+ from app_lineart import create_demo as create_demo_lineart
12
+ from app_mlsd import create_demo as create_demo_mlsd
13
+ from app_normal import create_demo as create_demo_normal
14
+ from app_openpose import create_demo as create_demo_openpose
15
+ from app_scribble import create_demo as create_demo_scribble
16
+ from app_scribble_interactive import \
17
+ create_demo as create_demo_scribble_interactive
18
+ from app_segmentation import create_demo as create_demo_segmentation
19
+ from app_shuffle import create_demo as create_demo_shuffle
20
+ from app_softedge import create_demo as create_demo_softedge
21
+ from model import Model
22
+ from settings import (ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID,
23
+ SHOW_DUPLICATE_BUTTON)
24
+
25
+ DESCRIPTION = '# ControlNet v1.1'
26
+
27
+ if not torch.cuda.is_available():
28
+ DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
29
+
30
+ model = Model(base_model_id=DEFAULT_MODEL_ID, task_name='Canny')
31
+
32
+ with gr.Blocks(css='style.css') as demo:
33
+ gr.Markdown(DESCRIPTION)
34
+ gr.DuplicateButton(value='Duplicate Space for private use',
35
+ elem_id='duplicate-button',
36
+ visible=SHOW_DUPLICATE_BUTTON)
37
+
38
+ with gr.Tabs():
39
+ with gr.TabItem('Canny'):
40
+ create_demo_canny(model.process_canny)
41
+ with gr.TabItem('MLSD'):
42
+ create_demo_mlsd(model.process_mlsd)
43
+ with gr.TabItem('Scribble'):
44
+ create_demo_scribble(model.process_scribble)
45
+ with gr.TabItem('Scribble Interactive'):
46
+ create_demo_scribble_interactive(
47
+ model.process_scribble_interactive)
48
+ with gr.TabItem('SoftEdge'):
49
+ create_demo_softedge(model.process_softedge)
50
+ with gr.TabItem('OpenPose'):
51
+ create_demo_openpose(model.process_openpose)
52
+ with gr.TabItem('Segmentation'):
53
+ create_demo_segmentation(model.process_segmentation)
54
+ with gr.TabItem('Depth'):
55
+ create_demo_depth(model.process_depth)
56
+ with gr.TabItem('Normal map'):
57
+ create_demo_normal(model.process_normal)
58
+ with gr.TabItem('Lineart'):
59
+ create_demo_lineart(model.process_lineart)
60
+ with gr.TabItem('Content Shuffle'):
61
+ create_demo_shuffle(model.process_shuffle)
62
+ with gr.TabItem('Instruct Pix2Pix'):
63
+ create_demo_ip2p(model.process_ip2p)
64
+
65
+ with gr.Accordion(label='Base model', open=False):
66
+ with gr.Row():
67
+ with gr.Column(scale=5):
68
+ current_base_model = gr.Text(label='Current base model')
69
+ with gr.Column(scale=1):
70
+ check_base_model_button = gr.Button('Check current base model')
71
+ with gr.Row():
72
+ with gr.Column(scale=5):
73
+ new_base_model_id = gr.Text(
74
+ label='New base model',
75
+ max_lines=1,
76
+ placeholder='runwayml/stable-diffusion-v1-5',
77
+ info=
78
+ 'The base model must be compatible with Stable Diffusion v1.5.',
79
+ interactive=ALLOW_CHANGING_BASE_MODEL)
80
+ with gr.Column(scale=1):
81
+ change_base_model_button = gr.Button(
82
+ 'Change base model', interactive=ALLOW_CHANGING_BASE_MODEL)
83
+ if not ALLOW_CHANGING_BASE_MODEL:
84
+ gr.Markdown(
85
+ '''The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space.'''
86
+ )
87
+
88
+ check_base_model_button.click(
89
+ fn=lambda: model.base_model_id,
90
+ outputs=current_base_model,
91
+ queue=False,
92
+ api_name='check_base_model',
93
+ )
94
+ new_base_model_id.submit(
95
+ fn=model.set_base_model,
96
+ inputs=new_base_model_id,
97
+ outputs=current_base_model,
98
+ api_name=False,
99
+ )
100
+ change_base_model_button.click(
101
+ fn=model.set_base_model,
102
+ inputs=new_base_model_id,
103
+ outputs=current_base_model,
104
+ api_name=False,
105
+ )
106
+
107
+ demo.queue(max_size=20).launch()
app_canny.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ num_samples = gr.Slider(label='Number of images',
19
+ minimum=1,
20
+ maximum=MAX_NUM_IMAGES,
21
+ value=DEFAULT_NUM_IMAGES,
22
+ step=1)
23
+ image_resolution = gr.Slider(
24
+ label='Image resolution',
25
+ minimum=256,
26
+ maximum=MAX_IMAGE_RESOLUTION,
27
+ value=DEFAULT_IMAGE_RESOLUTION,
28
+ step=256)
29
+ canny_low_threshold = gr.Slider(
30
+ label='Canny low threshold',
31
+ minimum=1,
32
+ maximum=255,
33
+ value=100,
34
+ step=1)
35
+ canny_high_threshold = gr.Slider(
36
+ label='Canny high threshold',
37
+ minimum=1,
38
+ maximum=255,
39
+ value=200,
40
+ step=1)
41
+ num_steps = gr.Slider(label='Number of steps',
42
+ minimum=1,
43
+ maximum=100,
44
+ value=20,
45
+ step=1)
46
+ guidance_scale = gr.Slider(label='Guidance scale',
47
+ minimum=0.1,
48
+ maximum=30.0,
49
+ value=9.0,
50
+ step=0.1)
51
+ seed = gr.Slider(label='Seed',
52
+ minimum=0,
53
+ maximum=MAX_SEED,
54
+ step=1,
55
+ value=0)
56
+ randomize_seed = gr.Checkbox(label='Randomize seed',
57
+ value=True)
58
+ a_prompt = gr.Textbox(
59
+ label='Additional prompt',
60
+ value='best quality, extremely detailed')
61
+ n_prompt = gr.Textbox(
62
+ label='Negative prompt',
63
+ value=
64
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
65
+ )
66
+ with gr.Column():
67
+ result = gr.Gallery(label='Output',
68
+ show_label=False,
69
+ columns=2,
70
+ object_fit='scale-down')
71
+ inputs = [
72
+ image,
73
+ prompt,
74
+ a_prompt,
75
+ n_prompt,
76
+ num_samples,
77
+ image_resolution,
78
+ num_steps,
79
+ guidance_scale,
80
+ seed,
81
+ canny_low_threshold,
82
+ canny_high_threshold,
83
+ ]
84
+ prompt.submit(
85
+ fn=randomize_seed_fn,
86
+ inputs=[seed, randomize_seed],
87
+ outputs=seed,
88
+ queue=False,
89
+ api_name=False,
90
+ ).then(
91
+ fn=process,
92
+ inputs=inputs,
93
+ outputs=result,
94
+ api_name=False,
95
+ )
96
+ run_button.click(
97
+ fn=randomize_seed_fn,
98
+ inputs=[seed, randomize_seed],
99
+ outputs=seed,
100
+ queue=False,
101
+ api_name=False,
102
+ ).then(
103
+ fn=process,
104
+ inputs=inputs,
105
+ outputs=result,
106
+ api_name='canny',
107
+ )
108
+ return demo
109
+
110
+
111
+ if __name__ == '__main__':
112
+ from model import Model
113
+ model = Model(task_name='Canny')
114
+ demo = create_demo(model.process_canny)
115
+ demo.queue().launch()
app_depth.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ preprocessor_name = gr.Radio(
19
+ label='Preprocessor',
20
+ choices=['Midas', 'DPT', 'None'],
21
+ type='value',
22
+ value='DPT')
23
+ num_samples = gr.Slider(label='Number of images',
24
+ minimum=1,
25
+ maximum=MAX_NUM_IMAGES,
26
+ value=DEFAULT_NUM_IMAGES,
27
+ step=1)
28
+ image_resolution = gr.Slider(
29
+ label='Image resolution',
30
+ minimum=256,
31
+ maximum=MAX_IMAGE_RESOLUTION,
32
+ value=DEFAULT_IMAGE_RESOLUTION,
33
+ step=256)
34
+ preprocess_resolution = gr.Slider(
35
+ label='Preprocess resolution',
36
+ minimum=128,
37
+ maximum=512,
38
+ value=384,
39
+ step=1)
40
+ num_steps = gr.Slider(label='Number of steps',
41
+ minimum=1,
42
+ maximum=100,
43
+ value=20,
44
+ step=1)
45
+ guidance_scale = gr.Slider(label='Guidance scale',
46
+ minimum=0.1,
47
+ maximum=30.0,
48
+ value=9.0,
49
+ step=0.1)
50
+ seed = gr.Slider(label='Seed',
51
+ minimum=0,
52
+ maximum=MAX_SEED,
53
+ step=1,
54
+ value=0)
55
+ randomize_seed = gr.Checkbox(label='Randomize seed',
56
+ value=True)
57
+ a_prompt = gr.Textbox(
58
+ label='Additional prompt',
59
+ value='best quality, extremely detailed')
60
+ n_prompt = gr.Textbox(
61
+ label='Negative prompt',
62
+ value=
63
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
64
+ )
65
+ with gr.Column():
66
+ result = gr.Gallery(label='Output',
67
+ show_label=False,
68
+ columns=2,
69
+ object_fit='scale-down')
70
+ inputs = [
71
+ image,
72
+ prompt,
73
+ a_prompt,
74
+ n_prompt,
75
+ num_samples,
76
+ image_resolution,
77
+ preprocess_resolution,
78
+ num_steps,
79
+ guidance_scale,
80
+ seed,
81
+ preprocessor_name,
82
+ ]
83
+ prompt.submit(
84
+ fn=randomize_seed_fn,
85
+ inputs=[seed, randomize_seed],
86
+ outputs=seed,
87
+ queue=False,
88
+ api_name=False,
89
+ ).then(
90
+ fn=process,
91
+ inputs=inputs,
92
+ outputs=result,
93
+ api_name=False,
94
+ )
95
+ run_button.click(
96
+ fn=randomize_seed_fn,
97
+ inputs=[seed, randomize_seed],
98
+ outputs=seed,
99
+ queue=False,
100
+ api_name=False,
101
+ ).then(
102
+ fn=process,
103
+ inputs=inputs,
104
+ outputs=result,
105
+ api_name='depth',
106
+ )
107
+ return demo
108
+
109
+
110
+ if __name__ == '__main__':
111
+ from model import Model
112
+ model = Model(task_name='depth')
113
+ demo = create_demo(model.process_depth)
114
+ demo.queue().launch()
app_ip2p.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ num_samples = gr.Slider(label='Number of images',
19
+ minimum=1,
20
+ maximum=MAX_NUM_IMAGES,
21
+ value=DEFAULT_NUM_IMAGES,
22
+ step=1)
23
+ image_resolution = gr.Slider(
24
+ label='Image resolution',
25
+ minimum=256,
26
+ maximum=MAX_IMAGE_RESOLUTION,
27
+ value=DEFAULT_IMAGE_RESOLUTION,
28
+ step=256)
29
+ num_steps = gr.Slider(label='Number of steps',
30
+ minimum=1,
31
+ maximum=100,
32
+ value=20,
33
+ step=1)
34
+ guidance_scale = gr.Slider(label='Guidance scale',
35
+ minimum=0.1,
36
+ maximum=30.0,
37
+ value=9.0,
38
+ step=0.1)
39
+ seed = gr.Slider(label='Seed',
40
+ minimum=0,
41
+ maximum=MAX_SEED,
42
+ step=1,
43
+ value=0)
44
+ randomize_seed = gr.Checkbox(label='Randomize seed',
45
+ value=True)
46
+ a_prompt = gr.Textbox(
47
+ label='Additional prompt',
48
+ value='best quality, extremely detailed')
49
+ n_prompt = gr.Textbox(
50
+ label='Negative prompt',
51
+ value=
52
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
53
+ )
54
+ with gr.Column():
55
+ result = gr.Gallery(label='Output',
56
+ show_label=False,
57
+ columns=2,
58
+ object_fit='scale-down')
59
+ inputs = [
60
+ image,
61
+ prompt,
62
+ a_prompt,
63
+ n_prompt,
64
+ num_samples,
65
+ image_resolution,
66
+ num_steps,
67
+ guidance_scale,
68
+ seed,
69
+ ]
70
+ prompt.submit(
71
+ fn=randomize_seed_fn,
72
+ inputs=[seed, randomize_seed],
73
+ outputs=seed,
74
+ queue=False,
75
+ api_name=False,
76
+ ).then(
77
+ fn=process,
78
+ inputs=inputs,
79
+ outputs=result,
80
+ api_name=False,
81
+ )
82
+ run_button.click(
83
+ fn=randomize_seed_fn,
84
+ inputs=[seed, randomize_seed],
85
+ outputs=seed,
86
+ queue=False,
87
+ api_name=False,
88
+ ).then(
89
+ fn=process,
90
+ inputs=inputs,
91
+ outputs=result,
92
+ api_name='ip2p',
93
+ )
94
+ return demo
95
+
96
+
97
+ if __name__ == '__main__':
98
+ from model import Model
99
+ model = Model(task_name='ip2p')
100
+ demo = create_demo(model.process_ip2p)
101
+ demo.queue().launch()
app_lineart.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ preprocessor_name = gr.Radio(
19
+ label='Preprocessor',
20
+ choices=[
21
+ 'Lineart',
22
+ 'Lineart coarse',
23
+ 'None',
24
+ 'Lineart (anime)',
25
+ 'None (anime)',
26
+ ],
27
+ type='value',
28
+ value='Lineart',
29
+ info=
30
+ 'Note that "Lineart (anime)" and "None (anime)" are for anime base models like Anything-v3.'
31
+ )
32
+ num_samples = gr.Slider(label='Number of images',
33
+ minimum=1,
34
+ maximum=MAX_NUM_IMAGES,
35
+ value=DEFAULT_NUM_IMAGES,
36
+ step=1)
37
+ image_resolution = gr.Slider(
38
+ label='Image resolution',
39
+ minimum=256,
40
+ maximum=MAX_IMAGE_RESOLUTION,
41
+ value=DEFAULT_IMAGE_RESOLUTION,
42
+ step=256)
43
+ preprocess_resolution = gr.Slider(
44
+ label='Preprocess resolution',
45
+ minimum=128,
46
+ maximum=512,
47
+ value=512,
48
+ step=1)
49
+ num_steps = gr.Slider(label='Number of steps',
50
+ minimum=1,
51
+ maximum=100,
52
+ value=20,
53
+ step=1)
54
+ guidance_scale = gr.Slider(label='Guidance scale',
55
+ minimum=0.1,
56
+ maximum=30.0,
57
+ value=9.0,
58
+ step=0.1)
59
+ seed = gr.Slider(label='Seed',
60
+ minimum=0,
61
+ maximum=MAX_SEED,
62
+ step=1,
63
+ value=0)
64
+ randomize_seed = gr.Checkbox(label='Randomize seed',
65
+ value=True)
66
+ a_prompt = gr.Textbox(
67
+ label='Additional prompt',
68
+ value='best quality, extremely detailed')
69
+ n_prompt = gr.Textbox(
70
+ label='Negative prompt',
71
+ value=
72
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
73
+ )
74
+ with gr.Column():
75
+ result = gr.Gallery(label='Output',
76
+ show_label=False,
77
+ columns=2,
78
+ object_fit='scale-down')
79
+ inputs = [
80
+ image,
81
+ prompt,
82
+ a_prompt,
83
+ n_prompt,
84
+ num_samples,
85
+ image_resolution,
86
+ preprocess_resolution,
87
+ num_steps,
88
+ guidance_scale,
89
+ seed,
90
+ preprocessor_name,
91
+ ]
92
+ prompt.submit(
93
+ fn=randomize_seed_fn,
94
+ inputs=[seed, randomize_seed],
95
+ outputs=seed,
96
+ queue=False,
97
+ api_name=False,
98
+ ).then(
99
+ fn=process,
100
+ inputs=inputs,
101
+ outputs=result,
102
+ api_name=False,
103
+ )
104
+ run_button.click(
105
+ fn=randomize_seed_fn,
106
+ inputs=[seed, randomize_seed],
107
+ outputs=seed,
108
+ queue=False,
109
+ api_name=False,
110
+ ).then(
111
+ fn=process,
112
+ inputs=inputs,
113
+ outputs=result,
114
+ api_name='lineart',
115
+ )
116
+ return demo
117
+
118
+
119
+ if __name__ == '__main__':
120
+ from model import Model
121
+ model = Model(task_name='lineart')
122
+ demo = create_demo(model.process_lineart)
123
+ demo.queue().launch()
app_mlsd.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ num_samples = gr.Slider(label='Number of images',
19
+ minimum=1,
20
+ maximum=MAX_NUM_IMAGES,
21
+ value=DEFAULT_NUM_IMAGES,
22
+ step=1)
23
+ image_resolution = gr.Slider(
24
+ label='Image resolution',
25
+ minimum=256,
26
+ maximum=MAX_IMAGE_RESOLUTION,
27
+ value=DEFAULT_IMAGE_RESOLUTION,
28
+ step=256)
29
+ preprocess_resolution = gr.Slider(
30
+ label='Preprocess resolution',
31
+ minimum=128,
32
+ maximum=512,
33
+ value=512,
34
+ step=1)
35
+ mlsd_value_threshold = gr.Slider(
36
+ label='Hough value threshold (MLSD)',
37
+ minimum=0.01,
38
+ maximum=2.0,
39
+ value=0.1,
40
+ step=0.01)
41
+ mlsd_distance_threshold = gr.Slider(
42
+ label='Hough distance threshold (MLSD)',
43
+ minimum=0.01,
44
+ maximum=20.0,
45
+ value=0.1,
46
+ step=0.01)
47
+ num_steps = gr.Slider(label='Number of steps',
48
+ minimum=1,
49
+ maximum=100,
50
+ value=20,
51
+ step=1)
52
+ guidance_scale = gr.Slider(label='Guidance scale',
53
+ minimum=0.1,
54
+ maximum=30.0,
55
+ value=9.0,
56
+ step=0.1)
57
+ seed = gr.Slider(label='Seed',
58
+ minimum=0,
59
+ maximum=MAX_SEED,
60
+ step=1,
61
+ value=0)
62
+ randomize_seed = gr.Checkbox(label='Randomize seed',
63
+ value=True)
64
+ a_prompt = gr.Textbox(
65
+ label='Additional prompt',
66
+ value='best quality, extremely detailed')
67
+ n_prompt = gr.Textbox(
68
+ label='Negative prompt',
69
+ value=
70
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
71
+ )
72
+ with gr.Column():
73
+ result = gr.Gallery(label='Output',
74
+ show_label=False,
75
+ columns=2,
76
+ object_fit='scale-down')
77
+ inputs = [
78
+ image,
79
+ prompt,
80
+ a_prompt,
81
+ n_prompt,
82
+ num_samples,
83
+ image_resolution,
84
+ preprocess_resolution,
85
+ num_steps,
86
+ guidance_scale,
87
+ seed,
88
+ mlsd_value_threshold,
89
+ mlsd_distance_threshold,
90
+ ]
91
+ prompt.submit(
92
+ fn=randomize_seed_fn,
93
+ inputs=[seed, randomize_seed],
94
+ outputs=seed,
95
+ queue=False,
96
+ api_name=False,
97
+ ).then(
98
+ fn=process,
99
+ inputs=inputs,
100
+ outputs=result,
101
+ api_name=False,
102
+ )
103
+ run_button.click(
104
+ fn=randomize_seed_fn,
105
+ inputs=[seed, randomize_seed],
106
+ outputs=seed,
107
+ queue=False,
108
+ api_name=False,
109
+ ).then(
110
+ fn=process,
111
+ inputs=inputs,
112
+ outputs=result,
113
+ api_name='mlsd',
114
+ )
115
+ return demo
116
+
117
+
118
+ if __name__ == '__main__':
119
+ from model import Model
120
+ model = Model(task_name='MLSD')
121
+ demo = create_demo(model.process_mlsd)
122
+ demo.queue().launch()
app_normal.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ preprocessor_name = gr.Radio(label='Preprocessor',
19
+ choices=['NormalBae', 'None'],
20
+ type='value',
21
+ value='NormalBae')
22
+ num_samples = gr.Slider(label='Images',
23
+ minimum=1,
24
+ maximum=MAX_NUM_IMAGES,
25
+ value=DEFAULT_NUM_IMAGES,
26
+ step=1)
27
+ image_resolution = gr.Slider(
28
+ label='Image resolution',
29
+ minimum=256,
30
+ maximum=MAX_IMAGE_RESOLUTION,
31
+ value=DEFAULT_IMAGE_RESOLUTION,
32
+ step=256)
33
+ preprocess_resolution = gr.Slider(
34
+ label='Preprocess resolution',
35
+ minimum=128,
36
+ maximum=512,
37
+ value=384,
38
+ step=1)
39
+ num_steps = gr.Slider(label='Number of steps',
40
+ minimum=1,
41
+ maximum=100,
42
+ value=20,
43
+ step=1)
44
+ guidance_scale = gr.Slider(label='Guidance scale',
45
+ minimum=0.1,
46
+ maximum=30.0,
47
+ value=9.0,
48
+ step=0.1)
49
+ seed = gr.Slider(label='Seed',
50
+ minimum=0,
51
+ maximum=MAX_SEED,
52
+ step=1,
53
+ value=0)
54
+ randomize_seed = gr.Checkbox(label='Randomize seed',
55
+ value=True)
56
+ a_prompt = gr.Textbox(
57
+ label='Additional prompt',
58
+ value='best quality, extremely detailed')
59
+ n_prompt = gr.Textbox(
60
+ label='Negative prompt',
61
+ value=
62
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
63
+ )
64
+ with gr.Column():
65
+ result = gr.Gallery(label='Output',
66
+ show_label=False,
67
+ columns=2,
68
+ object_fit='scale-down')
69
+ inputs = [
70
+ image,
71
+ prompt,
72
+ a_prompt,
73
+ n_prompt,
74
+ num_samples,
75
+ image_resolution,
76
+ preprocess_resolution,
77
+ num_steps,
78
+ guidance_scale,
79
+ seed,
80
+ preprocessor_name,
81
+ ]
82
+ prompt.submit(
83
+ fn=randomize_seed_fn,
84
+ inputs=[seed, randomize_seed],
85
+ outputs=seed,
86
+ queue=False,
87
+ api_name=False,
88
+ ).then(
89
+ fn=process,
90
+ inputs=inputs,
91
+ outputs=result,
92
+ api_name=False,
93
+ )
94
+ run_button.click(
95
+ fn=randomize_seed_fn,
96
+ inputs=[seed, randomize_seed],
97
+ outputs=seed,
98
+ queue=False,
99
+ api_name=False,
100
+ ).then(
101
+ fn=process,
102
+ inputs=inputs,
103
+ outputs=result,
104
+ api_name='normal',
105
+ )
106
+ return demo
107
+
108
+
109
+ if __name__ == '__main__':
110
+ from model import Model
111
+ model = Model(task_name='NormalBae')
112
+ demo = create_demo(model.process_normal)
113
+ demo.queue().launch()
app_openpose.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button(label='Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ preprocessor_name = gr.Radio(label='Preprocessor',
19
+ choices=['Openpose', 'None'],
20
+ type='value',
21
+ value='Openpose')
22
+ num_samples = gr.Slider(label='Number of images',
23
+ minimum=1,
24
+ maximum=MAX_NUM_IMAGES,
25
+ value=DEFAULT_NUM_IMAGES,
26
+ step=1)
27
+ image_resolution = gr.Slider(
28
+ label='Image resolution',
29
+ minimum=256,
30
+ maximum=MAX_IMAGE_RESOLUTION,
31
+ value=DEFAULT_IMAGE_RESOLUTION,
32
+ step=256)
33
+ preprocess_resolution = gr.Slider(
34
+ label='Preprocess resolution',
35
+ minimum=128,
36
+ maximum=512,
37
+ value=512,
38
+ step=1)
39
+ num_steps = gr.Slider(label='Number of steps',
40
+ minimum=1,
41
+ maximum=100,
42
+ value=20,
43
+ step=1)
44
+ guidance_scale = gr.Slider(label='Guidance scale',
45
+ minimum=0.1,
46
+ maximum=30.0,
47
+ value=9.0,
48
+ step=0.1)
49
+ seed = gr.Slider(label='Seed',
50
+ minimum=0,
51
+ maximum=MAX_SEED,
52
+ step=1,
53
+ value=0)
54
+ randomize_seed = gr.Checkbox(label='Randomize seed',
55
+ value=True)
56
+ a_prompt = gr.Textbox(
57
+ label='Additional prompt',
58
+ value='best quality, extremely detailed')
59
+ n_prompt = gr.Textbox(
60
+ label='Negative prompt',
61
+ value=
62
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
63
+ )
64
+ with gr.Column():
65
+ result = gr.Gallery(label='Output',
66
+ show_label=False,
67
+ columns=2,
68
+ object_fit='scale-down')
69
+ inputs = [
70
+ image,
71
+ prompt,
72
+ a_prompt,
73
+ n_prompt,
74
+ num_samples,
75
+ image_resolution,
76
+ preprocess_resolution,
77
+ num_steps,
78
+ guidance_scale,
79
+ seed,
80
+ preprocessor_name,
81
+ ]
82
+ prompt.submit(
83
+ fn=randomize_seed_fn,
84
+ inputs=[seed, randomize_seed],
85
+ outputs=seed,
86
+ queue=False,
87
+ api_name=False,
88
+ ).then(
89
+ fn=process,
90
+ inputs=inputs,
91
+ outputs=result,
92
+ api_name=False,
93
+ )
94
+ run_button.click(
95
+ fn=randomize_seed_fn,
96
+ inputs=[seed, randomize_seed],
97
+ outputs=seed,
98
+ queue=False,
99
+ api_name=False,
100
+ ).then(
101
+ fn=process,
102
+ inputs=inputs,
103
+ outputs=result,
104
+ api_name='openpose',
105
+ )
106
+ return demo
107
+
108
+
109
+ if __name__ == '__main__':
110
+ from model import Model
111
+ model = Model(task_name='Openpose')
112
+ demo = create_demo(model.process_openpose)
113
+ demo.queue().launch()
app_scribble.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ preprocessor_name = gr.Radio(
19
+ label='Preprocessor',
20
+ choices=['HED', 'PidiNet', 'None'],
21
+ type='value',
22
+ value='HED')
23
+ num_samples = gr.Slider(label='Number of images',
24
+ minimum=1,
25
+ maximum=MAX_NUM_IMAGES,
26
+ value=DEFAULT_NUM_IMAGES,
27
+ step=1)
28
+ image_resolution = gr.Slider(
29
+ label='Image resolution',
30
+ minimum=256,
31
+ maximum=MAX_IMAGE_RESOLUTION,
32
+ value=DEFAULT_IMAGE_RESOLUTION,
33
+ step=256)
34
+ preprocess_resolution = gr.Slider(
35
+ label='Preprocess resolution',
36
+ minimum=128,
37
+ maximum=512,
38
+ value=512,
39
+ step=1)
40
+ num_steps = gr.Slider(label='Number of steps',
41
+ minimum=1,
42
+ maximum=100,
43
+ value=20,
44
+ step=1)
45
+ guidance_scale = gr.Slider(label='Guidance scale',
46
+ minimum=0.1,
47
+ maximum=30.0,
48
+ value=9.0,
49
+ step=0.1)
50
+ seed = gr.Slider(label='Seed',
51
+ minimum=0,
52
+ maximum=MAX_SEED,
53
+ step=1,
54
+ value=0)
55
+ randomize_seed = gr.Checkbox(label='Randomize seed',
56
+ value=True)
57
+ a_prompt = gr.Textbox(
58
+ label='Additional prompt',
59
+ value='best quality, extremely detailed')
60
+ n_prompt = gr.Textbox(
61
+ label='Negative prompt',
62
+ value=
63
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
64
+ )
65
+ with gr.Column():
66
+ result = gr.Gallery(label='Output',
67
+ show_label=False,
68
+ columns=2,
69
+ object_fit='scale-down')
70
+ inputs = [
71
+ image,
72
+ prompt,
73
+ a_prompt,
74
+ n_prompt,
75
+ num_samples,
76
+ image_resolution,
77
+ preprocess_resolution,
78
+ num_steps,
79
+ guidance_scale,
80
+ seed,
81
+ preprocessor_name,
82
+ ]
83
+ prompt.submit(
84
+ fn=randomize_seed_fn,
85
+ inputs=[seed, randomize_seed],
86
+ outputs=seed,
87
+ queue=False,
88
+ api_name=False,
89
+ ).then(
90
+ fn=process,
91
+ inputs=inputs,
92
+ outputs=result,
93
+ api_name=False,
94
+ )
95
+ run_button.click(
96
+ fn=randomize_seed_fn,
97
+ inputs=[seed, randomize_seed],
98
+ outputs=seed,
99
+ queue=False,
100
+ api_name=False,
101
+ ).then(
102
+ fn=process,
103
+ inputs=inputs,
104
+ outputs=result,
105
+ api_name='scribble',
106
+ )
107
+ return demo
108
+
109
+
110
+ if __name__ == '__main__':
111
+ from model import Model
112
+ model = Model(task_name='scribble')
113
+ demo = create_demo(model.process_scribble)
114
+ demo.queue().launch()
app_scribble_interactive.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+
6
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
7
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
8
+ from utils import randomize_seed_fn
9
+
10
+
11
+ def create_canvas(w, h):
12
+ return np.zeros(shape=(h, w, 3), dtype=np.uint8) + 255
13
+
14
+
15
+ def create_demo(process):
16
+ with gr.Blocks() as demo:
17
+ with gr.Row():
18
+ with gr.Column():
19
+ canvas_width = gr.Slider(label='Canvas width',
20
+ minimum=256,
21
+ maximum=MAX_IMAGE_RESOLUTION,
22
+ value=DEFAULT_IMAGE_RESOLUTION,
23
+ step=1)
24
+ canvas_height = gr.Slider(label='Canvas height',
25
+ minimum=256,
26
+ maximum=MAX_IMAGE_RESOLUTION,
27
+ value=DEFAULT_IMAGE_RESOLUTION,
28
+ step=1)
29
+ create_button = gr.Button('Open drawing canvas!')
30
+ image = gr.Image(tool='sketch', brush_radius=10)
31
+ prompt = gr.Textbox(label='Prompt')
32
+ run_button = gr.Button('Run')
33
+ with gr.Accordion('Advanced options', open=False):
34
+ num_samples = gr.Slider(label='Number of images',
35
+ minimum=1,
36
+ maximum=MAX_NUM_IMAGES,
37
+ value=DEFAULT_NUM_IMAGES,
38
+ step=1)
39
+ image_resolution = gr.Slider(
40
+ label='Image resolution',
41
+ minimum=256,
42
+ maximum=MAX_IMAGE_RESOLUTION,
43
+ value=DEFAULT_IMAGE_RESOLUTION,
44
+ step=256)
45
+ num_steps = gr.Slider(label='Number of steps',
46
+ minimum=1,
47
+ maximum=100,
48
+ value=20,
49
+ step=1)
50
+ guidance_scale = gr.Slider(label='Guidance scale',
51
+ minimum=0.1,
52
+ maximum=30.0,
53
+ value=9.0,
54
+ step=0.1)
55
+ seed = gr.Slider(label='Seed',
56
+ minimum=0,
57
+ maximum=MAX_SEED,
58
+ step=1,
59
+ value=0)
60
+ randomize_seed = gr.Checkbox(label='Randomize seed',
61
+ value=True)
62
+ a_prompt = gr.Textbox(
63
+ label='Additional prompt',
64
+ value='best quality, extremely detailed')
65
+ n_prompt = gr.Textbox(
66
+ label='Negative prompt',
67
+ value=
68
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
69
+ )
70
+ with gr.Column():
71
+ result = gr.Gallery(label='Output',
72
+ show_label=False,
73
+ columns=2,
74
+ object_fit='scale-down')
75
+
76
+ create_button.click(
77
+ fn=create_canvas,
78
+ inputs=[canvas_width, canvas_height],
79
+ outputs=image,
80
+ queue=False,
81
+ api_name=False,
82
+ )
83
+
84
+ inputs = [
85
+ image,
86
+ prompt,
87
+ a_prompt,
88
+ n_prompt,
89
+ num_samples,
90
+ image_resolution,
91
+ num_steps,
92
+ guidance_scale,
93
+ seed,
94
+ ]
95
+ prompt.submit(
96
+ fn=randomize_seed_fn,
97
+ inputs=[seed, randomize_seed],
98
+ outputs=seed,
99
+ queue=False,
100
+ api_name=False,
101
+ ).then(
102
+ fn=process,
103
+ inputs=inputs,
104
+ outputs=result,
105
+ api_name=False,
106
+ )
107
+ run_button.click(
108
+ fn=randomize_seed_fn,
109
+ inputs=[seed, randomize_seed],
110
+ outputs=seed,
111
+ queue=False,
112
+ api_name=False,
113
+ ).then(
114
+ fn=process,
115
+ inputs=inputs,
116
+ outputs=result,
117
+ )
118
+ return demo
119
+
120
+
121
+ if __name__ == '__main__':
122
+ from model import Model
123
+ model = Model(task_name='scribble')
124
+ demo = create_demo(model.process_scribble_interactive)
125
+ demo.queue().launch()
app_segmentation.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ preprocessor_name = gr.Radio(label='Preprocessor',
19
+ choices=['UPerNet', 'None'],
20
+ type='value',
21
+ value='UPerNet')
22
+ num_samples = gr.Slider(label='Number of images',
23
+ minimum=1,
24
+ maximum=MAX_NUM_IMAGES,
25
+ value=DEFAULT_NUM_IMAGES,
26
+ step=1)
27
+ image_resolution = gr.Slider(
28
+ label='Image resolution',
29
+ minimum=256,
30
+ maximum=MAX_IMAGE_RESOLUTION,
31
+ value=DEFAULT_IMAGE_RESOLUTION,
32
+ step=256)
33
+ preprocess_resolution = gr.Slider(
34
+ label='Preprocess resolution',
35
+ minimum=128,
36
+ maximum=512,
37
+ value=512,
38
+ step=1)
39
+ num_steps = gr.Slider(label='Number of steps',
40
+ minimum=1,
41
+ maximum=100,
42
+ value=20,
43
+ step=1)
44
+ guidance_scale = gr.Slider(label='Guidance scale',
45
+ minimum=0.1,
46
+ maximum=30.0,
47
+ value=9.0,
48
+ step=0.1)
49
+ seed = gr.Slider(label='Seed',
50
+ minimum=0,
51
+ maximum=MAX_SEED,
52
+ step=1,
53
+ value=0)
54
+ randomize_seed = gr.Checkbox(label='Randomize seed',
55
+ value=True)
56
+ a_prompt = gr.Textbox(
57
+ label='Additional prompt',
58
+ value='best quality, extremely detailed')
59
+ n_prompt = gr.Textbox(
60
+ label='Negative prompt',
61
+ value=
62
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
63
+ )
64
+ with gr.Column():
65
+ result = gr.Gallery(label='Output',
66
+ show_label=False,
67
+ columns=2,
68
+ object_fit='scale-down')
69
+ inputs = [
70
+ image,
71
+ prompt,
72
+ a_prompt,
73
+ n_prompt,
74
+ num_samples,
75
+ image_resolution,
76
+ preprocess_resolution,
77
+ num_steps,
78
+ guidance_scale,
79
+ seed,
80
+ preprocessor_name,
81
+ ]
82
+ prompt.submit(
83
+ fn=randomize_seed_fn,
84
+ inputs=[seed, randomize_seed],
85
+ outputs=seed,
86
+ queue=False,
87
+ api_name=False,
88
+ ).then(
89
+ fn=process,
90
+ inputs=inputs,
91
+ outputs=result,
92
+ api_name=False,
93
+ )
94
+ run_button.click(
95
+ fn=randomize_seed_fn,
96
+ inputs=[seed, randomize_seed],
97
+ outputs=seed,
98
+ queue=False,
99
+ api_name=False,
100
+ ).then(
101
+ fn=process,
102
+ inputs=inputs,
103
+ outputs=result,
104
+ api_name='segmentation',
105
+ )
106
+ return demo
107
+
108
+
109
+ if __name__ == '__main__':
110
+ from model import Model
111
+ model = Model(task_name='segmentation')
112
+ demo = create_demo(model.process_segmentation)
113
+ demo.queue().launch()
app_shuffle.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ preprocessor_name = gr.Radio(
19
+ label='Preprocessor',
20
+ choices=['ContentShuffle', 'None'],
21
+ type='value',
22
+ value='ContentShuffle')
23
+ num_samples = gr.Slider(label='Number of images',
24
+ minimum=1,
25
+ maximum=MAX_NUM_IMAGES,
26
+ value=DEFAULT_NUM_IMAGES,
27
+ step=1)
28
+ image_resolution = gr.Slider(
29
+ label='Image resolution',
30
+ minimum=256,
31
+ maximum=MAX_IMAGE_RESOLUTION,
32
+ value=DEFAULT_IMAGE_RESOLUTION,
33
+ step=256)
34
+ num_steps = gr.Slider(label='Number of steps',
35
+ minimum=1,
36
+ maximum=100,
37
+ value=20,
38
+ step=1)
39
+ guidance_scale = gr.Slider(label='Guidance scale',
40
+ minimum=0.1,
41
+ maximum=30.0,
42
+ value=9.0,
43
+ step=0.1)
44
+ seed = gr.Slider(label='Seed',
45
+ minimum=0,
46
+ maximum=MAX_SEED,
47
+ step=1,
48
+ value=0)
49
+ randomize_seed = gr.Checkbox(label='Randomize seed',
50
+ value=True)
51
+ a_prompt = gr.Textbox(
52
+ label='Additional prompt',
53
+ value='best quality, extremely detailed')
54
+ n_prompt = gr.Textbox(
55
+ label='Negative prompt',
56
+ value=
57
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
58
+ )
59
+ with gr.Column():
60
+ result = gr.Gallery(label='Output',
61
+ show_label=False,
62
+ columns=2,
63
+ object_fit='scale-down')
64
+ inputs = [
65
+ image,
66
+ prompt,
67
+ a_prompt,
68
+ n_prompt,
69
+ num_samples,
70
+ image_resolution,
71
+ num_steps,
72
+ guidance_scale,
73
+ seed,
74
+ preprocessor_name,
75
+ ]
76
+ prompt.submit(
77
+ fn=randomize_seed_fn,
78
+ inputs=[seed, randomize_seed],
79
+ outputs=seed,
80
+ queue=False,
81
+ api_name=False,
82
+ ).then(
83
+ fn=process,
84
+ inputs=inputs,
85
+ outputs=result,
86
+ api_name=False,
87
+ )
88
+ run_button.click(
89
+ fn=randomize_seed_fn,
90
+ inputs=[seed, randomize_seed],
91
+ outputs=seed,
92
+ queue=False,
93
+ api_name=False,
94
+ ).then(
95
+ fn=process,
96
+ inputs=inputs,
97
+ outputs=result,
98
+ api_name='content-shuffle',
99
+ )
100
+ return demo
101
+
102
+
103
+ if __name__ == '__main__':
104
+ from model import Model
105
+ model = Model(task_name='shuffle')
106
+ demo = create_demo(model.process_shuffle)
107
+ demo.queue().launch()
app_softedge.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
+ MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
7
+ from utils import randomize_seed_fn
8
+
9
+
10
+ def create_demo(process):
11
+ with gr.Blocks() as demo:
12
+ with gr.Row():
13
+ with gr.Column():
14
+ image = gr.Image()
15
+ prompt = gr.Textbox(label='Prompt')
16
+ run_button = gr.Button('Run')
17
+ with gr.Accordion('Advanced options', open=False):
18
+ preprocessor_name = gr.Radio(label='Preprocessor',
19
+ choices=[
20
+ 'HED',
21
+ 'PidiNet',
22
+ 'HED safe',
23
+ 'PidiNet safe',
24
+ 'None',
25
+ ],
26
+ type='value',
27
+ value='PidiNet')
28
+ num_samples = gr.Slider(label='Number of images',
29
+ minimum=1,
30
+ maximum=MAX_NUM_IMAGES,
31
+ value=DEFAULT_NUM_IMAGES,
32
+ step=1)
33
+ image_resolution = gr.Slider(
34
+ label='Image resolution',
35
+ minimum=256,
36
+ maximum=MAX_IMAGE_RESOLUTION,
37
+ value=DEFAULT_IMAGE_RESOLUTION,
38
+ step=256)
39
+ preprocess_resolution = gr.Slider(
40
+ label='Preprocess resolution',
41
+ minimum=128,
42
+ maximum=512,
43
+ value=512,
44
+ step=1)
45
+ num_steps = gr.Slider(label='Number of steps',
46
+ minimum=1,
47
+ maximum=100,
48
+ value=20,
49
+ step=1)
50
+ guidance_scale = gr.Slider(label='Guidance scale',
51
+ minimum=0.1,
52
+ maximum=30.0,
53
+ value=9.0,
54
+ step=0.1)
55
+ seed = gr.Slider(label='Seed',
56
+ minimum=0,
57
+ maximum=MAX_SEED,
58
+ step=1,
59
+ value=0)
60
+ randomize_seed = gr.Checkbox(label='Randomize seed',
61
+ value=True)
62
+ a_prompt = gr.Textbox(
63
+ label='Additional prompt',
64
+ value='best quality, extremely detailed')
65
+ n_prompt = gr.Textbox(
66
+ label='Negative prompt',
67
+ value=
68
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
69
+ )
70
+ with gr.Column():
71
+ result = gr.Gallery(label='Output',
72
+ show_label=False,
73
+ columns=2,
74
+ object_fit='scale-down')
75
+ inputs = [
76
+ image,
77
+ prompt,
78
+ a_prompt,
79
+ n_prompt,
80
+ num_samples,
81
+ image_resolution,
82
+ preprocess_resolution,
83
+ num_steps,
84
+ guidance_scale,
85
+ seed,
86
+ preprocessor_name,
87
+ ]
88
+ prompt.submit(
89
+ fn=randomize_seed_fn,
90
+ inputs=[seed, randomize_seed],
91
+ outputs=seed,
92
+ queue=False,
93
+ api_name=False,
94
+ ).then(
95
+ fn=process,
96
+ inputs=inputs,
97
+ outputs=result,
98
+ api_name=False,
99
+ )
100
+ run_button.click(
101
+ fn=randomize_seed_fn,
102
+ inputs=[seed, randomize_seed],
103
+ outputs=seed,
104
+ queue=False,
105
+ api_name=False,
106
+ ).then(
107
+ fn=process,
108
+ inputs=inputs,
109
+ outputs=result,
110
+ api_name='softedge',
111
+ )
112
+ return demo
113
+
114
+
115
+ if __name__ == '__main__':
116
+ from model import Model
117
+ model = Model(task_name='softedge')
118
+ demo = create_demo(model.process_softedge)
119
+ demo.queue().launch()
cv_utils.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+
5
+ def resize_image(input_image, resolution, interpolation=None):
6
+ H, W, C = input_image.shape
7
+ H = float(H)
8
+ W = float(W)
9
+ k = float(resolution) / max(H, W)
10
+ H *= k
11
+ W *= k
12
+ H = int(np.round(H / 64.0)) * 64
13
+ W = int(np.round(W / 64.0)) * 64
14
+ if interpolation is None:
15
+ interpolation = cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA
16
+ img = cv2.resize(input_image, (W, H), interpolation=interpolation)
17
+ return img
depth_estimator.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import PIL.Image
3
+ from controlnet_aux.util import HWC3
4
+ from transformers import pipeline
5
+
6
+ from cv_utils import resize_image
7
+
8
+
9
+ class DepthEstimator:
10
+ def __init__(self):
11
+ self.model = pipeline('depth-estimation')
12
+
13
+ def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
14
+ detect_resolution = kwargs.pop('detect_resolution', 512)
15
+ image_resolution = kwargs.pop('image_resolution', 512)
16
+ image = np.array(image)
17
+ image = HWC3(image)
18
+ image = resize_image(image, resolution=detect_resolution)
19
+ image = PIL.Image.fromarray(image)
20
+ image = self.model(image)
21
+ image = image['depth']
22
+ image = np.array(image)
23
+ image = HWC3(image)
24
+ image = resize_image(image, resolution=image_resolution)
25
+ return PIL.Image.fromarray(image)
image_segmentor.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import PIL.Image
4
+ import torch
5
+ from controlnet_aux.util import HWC3, ade_palette
6
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
7
+
8
+ from cv_utils import resize_image
9
+
10
+
11
+ class ImageSegmentor:
12
+ def __init__(self):
13
+ self.image_processor = AutoImageProcessor.from_pretrained(
14
+ 'openmmlab/upernet-convnext-small')
15
+ self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained(
16
+ 'openmmlab/upernet-convnext-small')
17
+
18
+ @torch.inference_mode()
19
+ def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
20
+ detect_resolution = kwargs.pop('detect_resolution', 512)
21
+ image_resolution = kwargs.pop('image_resolution', 512)
22
+ image = HWC3(image)
23
+ image = resize_image(image, resolution=detect_resolution)
24
+ image = PIL.Image.fromarray(image)
25
+
26
+ pixel_values = self.image_processor(image,
27
+ return_tensors='pt').pixel_values
28
+ outputs = self.image_segmentor(pixel_values)
29
+ seg = self.image_processor.post_process_semantic_segmentation(
30
+ outputs, target_sizes=[image.size[::-1]])[0]
31
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
32
+ for label, color in enumerate(ade_palette()):
33
+ color_seg[seg == label, :] = color
34
+ color_seg = color_seg.astype(np.uint8)
35
+
36
+ color_seg = resize_image(color_seg,
37
+ resolution=image_resolution,
38
+ interpolation=cv2.INTER_NEAREST)
39
+ return PIL.Image.fromarray(color_seg)
model.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gc
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from controlnet_aux.util import HWC3
9
+ from diffusers import (ControlNetModel, DiffusionPipeline,
10
+ StableDiffusionControlNetPipeline,
11
+ UniPCMultistepScheduler)
12
+
13
+ from cv_utils import resize_image
14
+ from preprocessor import Preprocessor
15
+
16
+ CONTROLNET_MODEL_IDS = {
17
+ 'Openpose': 'lllyasviel/control_v11p_sd15_openpose',
18
+ 'Canny': 'lllyasviel/control_v11p_sd15_canny',
19
+ 'MLSD': 'lllyasviel/control_v11p_sd15_mlsd',
20
+ 'scribble': 'lllyasviel/control_v11p_sd15_scribble',
21
+ 'softedge': 'lllyasviel/control_v11p_sd15_softedge',
22
+ 'segmentation': 'lllyasviel/control_v11p_sd15_seg',
23
+ 'depth': 'lllyasviel/control_v11f1p_sd15_depth',
24
+ 'NormalBae': 'lllyasviel/control_v11p_sd15_normalbae',
25
+ 'lineart': 'lllyasviel/control_v11p_sd15_lineart',
26
+ 'lineart_anime': 'lllyasviel/control_v11p_sd15s2_lineart_anime',
27
+ 'shuffle': 'lllyasviel/control_v11e_sd15_shuffle',
28
+ 'ip2p': 'lllyasviel/control_v11e_sd15_ip2p',
29
+ 'inpaint': 'lllyasviel/control_v11e_sd15_inpaint',
30
+ }
31
+
32
+
33
+ def download_all_controlnet_weights() -> None:
34
+ for model_id in CONTROLNET_MODEL_IDS.values():
35
+ ControlNetModel.from_pretrained(model_id)
36
+
37
+
38
+ class Model:
39
+ def __init__(self,
40
+ base_model_id: str = 'runwayml/stable-diffusion-v1-5',
41
+ task_name: str = 'Canny'):
42
+ self.device = torch.device(
43
+ 'cuda:0' if torch.cuda.is_available() else 'cpu')
44
+ self.base_model_id = ''
45
+ self.task_name = ''
46
+ self.pipe = self.load_pipe(base_model_id, task_name)
47
+ self.preprocessor = Preprocessor()
48
+
49
+ def load_pipe(self, base_model_id: str, task_name) -> DiffusionPipeline:
50
+ if base_model_id == self.base_model_id and task_name == self.task_name and hasattr(
51
+ self, 'pipe') and self.pipe is not None:
52
+ return self.pipe
53
+ model_id = CONTROLNET_MODEL_IDS[task_name]
54
+ controlnet = ControlNetModel.from_pretrained(model_id,
55
+ torch_dtype=torch.float16)
56
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
57
+ base_model_id,
58
+ safety_checker=None,
59
+ controlnet=controlnet,
60
+ torch_dtype=torch.float16)
61
+ pipe.scheduler = UniPCMultistepScheduler.from_config(
62
+ pipe.scheduler.config)
63
+ if self.device.type == 'cuda':
64
+ pipe.enable_xformers_memory_efficient_attention()
65
+ pipe.to(self.device)
66
+ torch.cuda.empty_cache()
67
+ gc.collect()
68
+ self.base_model_id = base_model_id
69
+ self.task_name = task_name
70
+ return pipe
71
+
72
+ def set_base_model(self, base_model_id: str) -> str:
73
+ if not base_model_id or base_model_id == self.base_model_id:
74
+ return self.base_model_id
75
+ del self.pipe
76
+ torch.cuda.empty_cache()
77
+ gc.collect()
78
+ try:
79
+ self.pipe = self.load_pipe(base_model_id, self.task_name)
80
+ except Exception:
81
+ self.pipe = self.load_pipe(self.base_model_id, self.task_name)
82
+ return self.base_model_id
83
+
84
+ def load_controlnet_weight(self, task_name: str) -> None:
85
+ if task_name == self.task_name:
86
+ return
87
+ if self.pipe is not None and hasattr(self.pipe, 'controlnet'):
88
+ del self.pipe.controlnet
89
+ torch.cuda.empty_cache()
90
+ gc.collect()
91
+ model_id = CONTROLNET_MODEL_IDS[task_name]
92
+ controlnet = ControlNetModel.from_pretrained(model_id,
93
+ torch_dtype=torch.float16)
94
+ controlnet.to(self.device)
95
+ torch.cuda.empty_cache()
96
+ gc.collect()
97
+ self.pipe.controlnet = controlnet
98
+ self.task_name = task_name
99
+
100
+ def get_prompt(self, prompt: str, additional_prompt: str) -> str:
101
+ if not prompt:
102
+ prompt = additional_prompt
103
+ else:
104
+ prompt = f'{prompt}, {additional_prompt}'
105
+ return prompt
106
+
107
+ @torch.autocast('cuda')
108
+ def run_pipe(
109
+ self,
110
+ prompt: str,
111
+ negative_prompt: str,
112
+ control_image: PIL.Image.Image,
113
+ num_images: int,
114
+ num_steps: int,
115
+ guidance_scale: float,
116
+ seed: int,
117
+ ) -> list[PIL.Image.Image]:
118
+ if seed == -1:
119
+ seed = np.random.randint(0, np.iinfo(np.int64).max)
120
+ generator = torch.Generator().manual_seed(seed)
121
+ return self.pipe(prompt=prompt,
122
+ negative_prompt=negative_prompt,
123
+ guidance_scale=guidance_scale,
124
+ num_images_per_prompt=num_images,
125
+ num_inference_steps=num_steps,
126
+ generator=generator,
127
+ image=control_image).images
128
+
129
+ @torch.inference_mode()
130
+ def process_canny(
131
+ self,
132
+ image: np.ndarray,
133
+ prompt: str,
134
+ additional_prompt: str,
135
+ negative_prompt: str,
136
+ num_images: int,
137
+ image_resolution: int,
138
+ num_steps: int,
139
+ guidance_scale: float,
140
+ seed: int,
141
+ low_threshold: int,
142
+ high_threshold: int,
143
+ ) -> list[PIL.Image.Image]:
144
+ self.preprocessor.load('Canny')
145
+ control_image = self.preprocessor(image=image,
146
+ low_threshold=low_threshold,
147
+ high_threshold=high_threshold,
148
+ detect_resolution=image_resolution)
149
+
150
+ self.load_controlnet_weight('Canny')
151
+ results = self.run_pipe(
152
+ prompt=self.get_prompt(prompt, additional_prompt),
153
+ negative_prompt=negative_prompt,
154
+ control_image=control_image,
155
+ num_images=num_images,
156
+ num_steps=num_steps,
157
+ guidance_scale=guidance_scale,
158
+ seed=seed,
159
+ )
160
+ return [control_image] + results
161
+
162
+ @torch.inference_mode()
163
+ def process_mlsd(
164
+ self,
165
+ image: np.ndarray,
166
+ prompt: str,
167
+ additional_prompt: str,
168
+ negative_prompt: str,
169
+ num_images: int,
170
+ image_resolution: int,
171
+ preprocess_resolution: int,
172
+ num_steps: int,
173
+ guidance_scale: float,
174
+ seed: int,
175
+ value_threshold: float,
176
+ distance_threshold: float,
177
+ ) -> list[PIL.Image.Image]:
178
+ self.preprocessor.load('MLSD')
179
+ control_image = self.preprocessor(
180
+ image=image,
181
+ image_resolution=image_resolution,
182
+ detect_resolution=preprocess_resolution,
183
+ thr_v=value_threshold,
184
+ thr_d=distance_threshold,
185
+ )
186
+ self.load_controlnet_weight('MLSD')
187
+ results = self.run_pipe(
188
+ prompt=self.get_prompt(prompt, additional_prompt),
189
+ negative_prompt=negative_prompt,
190
+ control_image=control_image,
191
+ num_images=num_images,
192
+ num_steps=num_steps,
193
+ guidance_scale=guidance_scale,
194
+ seed=seed,
195
+ )
196
+ return [control_image] + results
197
+
198
+ @torch.inference_mode()
199
+ def process_scribble(
200
+ self,
201
+ image: np.ndarray,
202
+ prompt: str,
203
+ additional_prompt: str,
204
+ negative_prompt: str,
205
+ num_images: int,
206
+ image_resolution: int,
207
+ preprocess_resolution: int,
208
+ num_steps: int,
209
+ guidance_scale: float,
210
+ seed: int,
211
+ preprocessor_name: str,
212
+ ) -> list[PIL.Image.Image]:
213
+ if preprocessor_name == 'None':
214
+ image = HWC3(image)
215
+ image = resize_image(image, resolution=image_resolution)
216
+ control_image = PIL.Image.fromarray(image)
217
+ elif preprocessor_name == 'HED':
218
+ self.preprocessor.load(preprocessor_name)
219
+ control_image = self.preprocessor(
220
+ image=image,
221
+ image_resolution=image_resolution,
222
+ detect_resolution=preprocess_resolution,
223
+ scribble=False,
224
+ )
225
+ elif preprocessor_name == 'PidiNet':
226
+ self.preprocessor.load(preprocessor_name)
227
+ control_image = self.preprocessor(
228
+ image=image,
229
+ image_resolution=image_resolution,
230
+ detect_resolution=preprocess_resolution,
231
+ safe=False,
232
+ )
233
+ self.load_controlnet_weight('scribble')
234
+ results = self.run_pipe(
235
+ prompt=self.get_prompt(prompt, additional_prompt),
236
+ negative_prompt=negative_prompt,
237
+ control_image=control_image,
238
+ num_images=num_images,
239
+ num_steps=num_steps,
240
+ guidance_scale=guidance_scale,
241
+ seed=seed,
242
+ )
243
+ return [control_image] + results
244
+
245
+ @torch.inference_mode()
246
+ def process_scribble_interactive(
247
+ self,
248
+ image_and_mask: dict[str, np.ndarray],
249
+ prompt: str,
250
+ additional_prompt: str,
251
+ negative_prompt: str,
252
+ num_images: int,
253
+ image_resolution: int,
254
+ num_steps: int,
255
+ guidance_scale: float,
256
+ seed: int,
257
+ ) -> list[PIL.Image.Image]:
258
+ image = image_and_mask['mask']
259
+ image = HWC3(image)
260
+ image = resize_image(image, resolution=image_resolution)
261
+ control_image = PIL.Image.fromarray(image)
262
+
263
+ self.load_controlnet_weight('scribble')
264
+ results = self.run_pipe(
265
+ prompt=self.get_prompt(prompt, additional_prompt),
266
+ negative_prompt=negative_prompt,
267
+ control_image=control_image,
268
+ num_images=num_images,
269
+ num_steps=num_steps,
270
+ guidance_scale=guidance_scale,
271
+ seed=seed,
272
+ )
273
+ return [control_image] + results
274
+
275
+ @torch.inference_mode()
276
+ def process_softedge(
277
+ self,
278
+ image: np.ndarray,
279
+ prompt: str,
280
+ additional_prompt: str,
281
+ negative_prompt: str,
282
+ num_images: int,
283
+ image_resolution: int,
284
+ preprocess_resolution: int,
285
+ num_steps: int,
286
+ guidance_scale: float,
287
+ seed: int,
288
+ preprocessor_name: str,
289
+ ) -> list[PIL.Image.Image]:
290
+ if preprocessor_name == 'None':
291
+ image = HWC3(image)
292
+ image = resize_image(image, resolution=image_resolution)
293
+ control_image = PIL.Image.fromarray(image)
294
+ elif preprocessor_name in ['HED', 'HED safe']:
295
+ safe = 'safe' in preprocessor_name
296
+ self.preprocessor.load('HED')
297
+ control_image = self.preprocessor(
298
+ image=image,
299
+ image_resolution=image_resolution,
300
+ detect_resolution=preprocess_resolution,
301
+ scribble=safe,
302
+ )
303
+ elif preprocessor_name in ['PidiNet', 'PidiNet safe']:
304
+ safe = 'safe' in preprocessor_name
305
+ self.preprocessor.load('PidiNet')
306
+ control_image = self.preprocessor(
307
+ image=image,
308
+ image_resolution=image_resolution,
309
+ detect_resolution=preprocess_resolution,
310
+ safe=safe,
311
+ )
312
+ else:
313
+ raise ValueError
314
+ self.load_controlnet_weight('softedge')
315
+ results = self.run_pipe(
316
+ prompt=self.get_prompt(prompt, additional_prompt),
317
+ negative_prompt=negative_prompt,
318
+ control_image=control_image,
319
+ num_images=num_images,
320
+ num_steps=num_steps,
321
+ guidance_scale=guidance_scale,
322
+ seed=seed,
323
+ )
324
+ return [control_image] + results
325
+
326
+ @torch.inference_mode()
327
+ def process_openpose(
328
+ self,
329
+ image: np.ndarray,
330
+ prompt: str,
331
+ additional_prompt: str,
332
+ negative_prompt: str,
333
+ num_images: int,
334
+ image_resolution: int,
335
+ preprocess_resolution: int,
336
+ num_steps: int,
337
+ guidance_scale: float,
338
+ seed: int,
339
+ preprocessor_name: str,
340
+ ) -> list[PIL.Image.Image]:
341
+ if preprocessor_name == 'None':
342
+ image = HWC3(image)
343
+ image = resize_image(image, resolution=image_resolution)
344
+ control_image = PIL.Image.fromarray(image)
345
+ else:
346
+ self.preprocessor.load('Openpose')
347
+ control_image = self.preprocessor(
348
+ image=image,
349
+ image_resolution=image_resolution,
350
+ detect_resolution=preprocess_resolution,
351
+ hand_and_face=True,
352
+ )
353
+ self.load_controlnet_weight('Openpose')
354
+ results = self.run_pipe(
355
+ prompt=self.get_prompt(prompt, additional_prompt),
356
+ negative_prompt=negative_prompt,
357
+ control_image=control_image,
358
+ num_images=num_images,
359
+ num_steps=num_steps,
360
+ guidance_scale=guidance_scale,
361
+ seed=seed,
362
+ )
363
+ return [control_image] + results
364
+
365
+ @torch.inference_mode()
366
+ def process_segmentation(
367
+ self,
368
+ image: np.ndarray,
369
+ prompt: str,
370
+ additional_prompt: str,
371
+ negative_prompt: str,
372
+ num_images: int,
373
+ image_resolution: int,
374
+ preprocess_resolution: int,
375
+ num_steps: int,
376
+ guidance_scale: float,
377
+ seed: int,
378
+ preprocessor_name: str,
379
+ ) -> list[PIL.Image.Image]:
380
+ if preprocessor_name == 'None':
381
+ image = HWC3(image)
382
+ image = resize_image(image, resolution=image_resolution)
383
+ control_image = PIL.Image.fromarray(image)
384
+ else:
385
+ self.preprocessor.load(preprocessor_name)
386
+ control_image = self.preprocessor(
387
+ image=image,
388
+ image_resolution=image_resolution,
389
+ detect_resolution=preprocess_resolution,
390
+ )
391
+ self.load_controlnet_weight('segmentation')
392
+ results = self.run_pipe(
393
+ prompt=self.get_prompt(prompt, additional_prompt),
394
+ negative_prompt=negative_prompt,
395
+ control_image=control_image,
396
+ num_images=num_images,
397
+ num_steps=num_steps,
398
+ guidance_scale=guidance_scale,
399
+ seed=seed,
400
+ )
401
+ return [control_image] + results
402
+
403
+ @torch.inference_mode()
404
+ def process_depth(
405
+ self,
406
+ image: np.ndarray,
407
+ prompt: str,
408
+ additional_prompt: str,
409
+ negative_prompt: str,
410
+ num_images: int,
411
+ image_resolution: int,
412
+ preprocess_resolution: int,
413
+ num_steps: int,
414
+ guidance_scale: float,
415
+ seed: int,
416
+ preprocessor_name: str,
417
+ ) -> list[PIL.Image.Image]:
418
+ if preprocessor_name == 'None':
419
+ image = HWC3(image)
420
+ image = resize_image(image, resolution=image_resolution)
421
+ control_image = PIL.Image.fromarray(image)
422
+ else:
423
+ self.preprocessor.load(preprocessor_name)
424
+ control_image = self.preprocessor(
425
+ image=image,
426
+ image_resolution=image_resolution,
427
+ detect_resolution=preprocess_resolution,
428
+ )
429
+ self.load_controlnet_weight('depth')
430
+ results = self.run_pipe(
431
+ prompt=self.get_prompt(prompt, additional_prompt),
432
+ negative_prompt=negative_prompt,
433
+ control_image=control_image,
434
+ num_images=num_images,
435
+ num_steps=num_steps,
436
+ guidance_scale=guidance_scale,
437
+ seed=seed,
438
+ )
439
+ return [control_image] + results
440
+
441
+ @torch.inference_mode()
442
+ def process_normal(
443
+ self,
444
+ image: np.ndarray,
445
+ prompt: str,
446
+ additional_prompt: str,
447
+ negative_prompt: str,
448
+ num_images: int,
449
+ image_resolution: int,
450
+ preprocess_resolution: int,
451
+ num_steps: int,
452
+ guidance_scale: float,
453
+ seed: int,
454
+ preprocessor_name: str,
455
+ ) -> list[PIL.Image.Image]:
456
+ if preprocessor_name == 'None':
457
+ image = HWC3(image)
458
+ image = resize_image(image, resolution=image_resolution)
459
+ control_image = PIL.Image.fromarray(image)
460
+ else:
461
+ self.preprocessor.load('NormalBae')
462
+ control_image = self.preprocessor(
463
+ image=image,
464
+ image_resolution=image_resolution,
465
+ detect_resolution=preprocess_resolution,
466
+ )
467
+ self.load_controlnet_weight('NormalBae')
468
+ results = self.run_pipe(
469
+ prompt=self.get_prompt(prompt, additional_prompt),
470
+ negative_prompt=negative_prompt,
471
+ control_image=control_image,
472
+ num_images=num_images,
473
+ num_steps=num_steps,
474
+ guidance_scale=guidance_scale,
475
+ seed=seed,
476
+ )
477
+ return [control_image] + results
478
+
479
+ @torch.inference_mode()
480
+ def process_lineart(
481
+ self,
482
+ image: np.ndarray,
483
+ prompt: str,
484
+ additional_prompt: str,
485
+ negative_prompt: str,
486
+ num_images: int,
487
+ image_resolution: int,
488
+ preprocess_resolution: int,
489
+ num_steps: int,
490
+ guidance_scale: float,
491
+ seed: int,
492
+ preprocessor_name: str,
493
+ ) -> list[PIL.Image.Image]:
494
+ if preprocessor_name in ['None', 'None (anime)']:
495
+ image = HWC3(image)
496
+ image = resize_image(image, resolution=image_resolution)
497
+ control_image = PIL.Image.fromarray(image)
498
+ elif preprocessor_name in ['Lineart', 'Lineart coarse']:
499
+ coarse = 'coarse' in preprocessor_name
500
+ self.preprocessor.load('Lineart')
501
+ control_image = self.preprocessor(
502
+ image=image,
503
+ image_resolution=image_resolution,
504
+ detect_resolution=preprocess_resolution,
505
+ coarse=coarse,
506
+ )
507
+ elif preprocessor_name == 'Lineart (anime)':
508
+ self.preprocessor.load('LineartAnime')
509
+ control_image = self.preprocessor(
510
+ image=image,
511
+ image_resolution=image_resolution,
512
+ detect_resolution=preprocess_resolution,
513
+ )
514
+ if 'anime' in preprocessor_name:
515
+ self.load_controlnet_weight('lineart_anime')
516
+ else:
517
+ self.load_controlnet_weight('lineart')
518
+ results = self.run_pipe(
519
+ prompt=self.get_prompt(prompt, additional_prompt),
520
+ negative_prompt=negative_prompt,
521
+ control_image=control_image,
522
+ num_images=num_images,
523
+ num_steps=num_steps,
524
+ guidance_scale=guidance_scale,
525
+ seed=seed,
526
+ )
527
+ return [control_image] + results
528
+
529
+ @torch.inference_mode()
530
+ def process_shuffle(
531
+ self,
532
+ image: np.ndarray,
533
+ prompt: str,
534
+ additional_prompt: str,
535
+ negative_prompt: str,
536
+ num_images: int,
537
+ image_resolution: int,
538
+ num_steps: int,
539
+ guidance_scale: float,
540
+ seed: int,
541
+ preprocessor_name: str,
542
+ ) -> list[PIL.Image.Image]:
543
+ if preprocessor_name == 'None':
544
+ image = HWC3(image)
545
+ image = resize_image(image, resolution=image_resolution)
546
+ control_image = PIL.Image.fromarray(image)
547
+ else:
548
+ self.preprocessor.load(preprocessor_name)
549
+ control_image = self.preprocessor(
550
+ image=image,
551
+ image_resolution=image_resolution,
552
+ )
553
+ self.load_controlnet_weight('shuffle')
554
+ results = self.run_pipe(
555
+ prompt=self.get_prompt(prompt, additional_prompt),
556
+ negative_prompt=negative_prompt,
557
+ control_image=control_image,
558
+ num_images=num_images,
559
+ num_steps=num_steps,
560
+ guidance_scale=guidance_scale,
561
+ seed=seed,
562
+ )
563
+ return [control_image] + results
564
+
565
+ @torch.inference_mode()
566
+ def process_ip2p(
567
+ self,
568
+ image: np.ndarray,
569
+ prompt: str,
570
+ additional_prompt: str,
571
+ negative_prompt: str,
572
+ num_images: int,
573
+ image_resolution: int,
574
+ num_steps: int,
575
+ guidance_scale: float,
576
+ seed: int,
577
+ ) -> list[PIL.Image.Image]:
578
+ image = HWC3(image)
579
+ image = resize_image(image, resolution=image_resolution)
580
+ control_image = PIL.Image.fromarray(image)
581
+ self.load_controlnet_weight('ip2p')
582
+ results = self.run_pipe(
583
+ prompt=self.get_prompt(prompt, additional_prompt),
584
+ negative_prompt=negative_prompt,
585
+ control_image=control_image,
586
+ num_images=num_images,
587
+ num_steps=num_steps,
588
+ guidance_scale=guidance_scale,
589
+ seed=seed,
590
+ )
591
+ return [control_image] + results
notebooks/notebook.ipynb ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "8CnkIPtjn8Dc"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "!git clone --recursive https://huggingface.co/spaces/hysts/ControlNet-v1-1"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": null,
17
+ "metadata": {
18
+ "id": "IZlaYNTWoFPK"
19
+ },
20
+ "outputs": [],
21
+ "source": [
22
+ "%cd ControlNet-v1-1"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": null,
28
+ "metadata": {
29
+ "id": "P_fzYrLvoIcI"
30
+ },
31
+ "outputs": [],
32
+ "source": [
33
+ "!pip install -q -r requirements.txt"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": null,
39
+ "metadata": {
40
+ "id": "GOfGng5Woktd"
41
+ },
42
+ "outputs": [],
43
+ "source": [
44
+ "import app"
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "code",
49
+ "execution_count": null,
50
+ "metadata": {
51
+ "id": "7Cued230ol7T"
52
+ },
53
+ "outputs": [],
54
+ "source": []
55
+ }
56
+ ],
57
+ "metadata": {
58
+ "accelerator": "GPU",
59
+ "colab": {
60
+ "provenance": []
61
+ },
62
+ "gpuClass": "standard",
63
+ "language_info": {
64
+ "name": "python"
65
+ }
66
+ },
67
+ "nbformat": 4,
68
+ "nbformat_minor": 0
69
+ }
preprocessor.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+
3
+ import numpy as np
4
+ import PIL.Image
5
+ import torch
6
+ from controlnet_aux import (CannyDetector, ContentShuffleDetector, HEDdetector,
7
+ LineartAnimeDetector, LineartDetector,
8
+ MidasDetector, MLSDdetector, NormalBaeDetector,
9
+ OpenposeDetector, PidiNetDetector)
10
+ from controlnet_aux.util import HWC3
11
+
12
+ from cv_utils import resize_image
13
+ from depth_estimator import DepthEstimator
14
+ from image_segmentor import ImageSegmentor
15
+
16
+
17
+ class Preprocessor:
18
+ MODEL_ID = 'lllyasviel/Annotators'
19
+
20
+ def __init__(self):
21
+ self.model = None
22
+ self.name = ''
23
+
24
+ def load(self, name: str) -> None:
25
+ if name == self.name:
26
+ return
27
+ if name == 'HED':
28
+ self.model = HEDdetector.from_pretrained(self.MODEL_ID)
29
+ elif name == 'Midas':
30
+ self.model = MidasDetector.from_pretrained(self.MODEL_ID)
31
+ elif name == 'MLSD':
32
+ self.model = MLSDdetector.from_pretrained(self.MODEL_ID)
33
+ elif name == 'Openpose':
34
+ self.model = OpenposeDetector.from_pretrained(self.MODEL_ID)
35
+ elif name == 'PidiNet':
36
+ self.model = PidiNetDetector.from_pretrained(self.MODEL_ID)
37
+ elif name == 'NormalBae':
38
+ self.model = NormalBaeDetector.from_pretrained(self.MODEL_ID)
39
+ elif name == 'Lineart':
40
+ self.model = LineartDetector.from_pretrained(self.MODEL_ID)
41
+ elif name == 'LineartAnime':
42
+ self.model = LineartAnimeDetector.from_pretrained(self.MODEL_ID)
43
+ elif name == 'Canny':
44
+ self.model = CannyDetector()
45
+ elif name == 'ContentShuffle':
46
+ self.model = ContentShuffleDetector()
47
+ elif name == 'DPT':
48
+ self.model = DepthEstimator()
49
+ elif name == 'UPerNet':
50
+ self.model = ImageSegmentor()
51
+ else:
52
+ raise ValueError
53
+ torch.cuda.empty_cache()
54
+ gc.collect()
55
+ self.name = name
56
+
57
+ def __call__(self, image: PIL.Image.Image, **kwargs) -> PIL.Image.Image:
58
+ if self.name == 'Canny':
59
+ if 'detect_resolution' in kwargs:
60
+ detect_resolution = kwargs.pop('detect_resolution')
61
+ image = np.array(image)
62
+ image = HWC3(image)
63
+ image = resize_image(image, resolution=detect_resolution)
64
+ image = self.model(image, **kwargs)
65
+ return PIL.Image.fromarray(image)
66
+ elif self.name == 'Midas':
67
+ detect_resolution = kwargs.pop('detect_resolution', 512)
68
+ image_resolution = kwargs.pop('image_resolution', 512)
69
+ image = np.array(image)
70
+ image = HWC3(image)
71
+ image = resize_image(image, resolution=detect_resolution)
72
+ image = self.model(image, **kwargs)
73
+ image = HWC3(image)
74
+ image = resize_image(image, resolution=image_resolution)
75
+ return PIL.Image.fromarray(image)
76
+ else:
77
+ return self.model(image, **kwargs)
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.20.3
2
+ controlnet_aux==0.0.6
3
+ diffusers==0.18.1
4
+ einops==0.6.1
5
+ gradio==3.36.1
6
+ huggingface-hub==0.16.4
7
+ mediapipe==0.10.1
8
+ opencv-python-headless==4.8.0.74
9
+ safetensors==0.3.1
10
+ torch==2.0.1
11
+ torchvision==0.15.2
12
+ transformers==4.30.2
13
+ xformers==0.0.20
settings.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import numpy as np
4
+
5
+ DEFAULT_MODEL_ID = os.getenv('DEFAULT_MODEL_ID',
6
+ 'runwayml/stable-diffusion-v1-5')
7
+
8
+ MAX_NUM_IMAGES = int(os.getenv('MAX_NUM_IMAGES', '3'))
9
+ DEFAULT_NUM_IMAGES = min(MAX_NUM_IMAGES,
10
+ int(os.getenv('DEFAULT_NUM_IMAGES', '3')))
11
+ MAX_IMAGE_RESOLUTION = int(os.getenv('MAX_IMAGE_RESOLUTION', '768'))
12
+ DEFAULT_IMAGE_RESOLUTION = min(
13
+ MAX_IMAGE_RESOLUTION, int(os.getenv('DEFAULT_IMAGE_RESOLUTION', '768')))
14
+
15
+ ALLOW_CHANGING_BASE_MODEL = os.getenv('SPACE_ID') != 'hysts/ControlNet-v1-1'
16
+ SHOW_DUPLICATE_BUTTON = os.getenv('SHOW_DUPLICATE_BUTTON') == '1'
17
+
18
+ MAX_SEED = np.iinfo(np.int32).max
style.css ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+
5
+ #duplicate-button {
6
+ margin: auto;
7
+ color: #fff;
8
+ background: #1565c0;
9
+ border-radius: 100vh;
10
+ }
utils.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ from settings import MAX_SEED
4
+
5
+
6
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
7
+ if randomize_seed:
8
+ seed = random.randint(0, MAX_SEED)
9
+ return seed