yonghenglh6 hysts HF staff commited on
Commit
fad8b24
0 Parent(s):

Duplicate from hysts/ControlNet-v1-1

Browse files

Co-authored-by: hysts <hysts@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio_cached_examples/
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ share/python-wheels/
26
+ *.egg-info/
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # poetry
100
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
+ #poetry.lock
105
+
106
+ # pdm
107
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
+ #pdm.lock
109
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
+ # in version control.
111
+ # https://pdm.fming.dev/#use-with-ide
112
+ .pdm.toml
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.2.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: double-quote-string-fixer
12
+ - id: end-of-file-fixer
13
+ - id: mixed-line-ending
14
+ args: ['--fix=lf']
15
+ - id: requirements-txt-fixer
16
+ - id: trailing-whitespace
17
+ - repo: https://github.com/myint/docformatter
18
+ rev: v1.4
19
+ hooks:
20
+ - id: docformatter
21
+ args: ['--in-place']
22
+ - repo: https://github.com/pycqa/isort
23
+ rev: 5.12.0
24
+ hooks:
25
+ - id: isort
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v0.991
28
+ hooks:
29
+ - id: mypy
30
+ args: ['--ignore-missing-imports']
31
+ additional_dependencies: ['types-python-slugify']
32
+ - repo: https://github.com/google/yapf
33
+ rev: v0.32.0
34
+ hooks:
35
+ - id: yapf
36
+ args: ['--parallel', '--in-place']
.style.yapf ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [style]
2
+ based_on_style = pep8
3
+ blank_line_before_nested_class_or_def = false
4
+ spaces_before_comment = 2
5
+ split_before_logical_operator = true
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 hysts
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
LICENSE.ControlNet ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ControlNet V1.1
3
+ emoji: 📉
4
+ colorFrom: yellow
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.34.0
8
+ python_version: 3.10.11
9
+ app_file: app.py
10
+ pinned: false
11
+ license: mit
12
+ suggested_hardware: t4-medium
13
+ duplicated_from: hysts/ControlNet-v1-1
14
+ ---
15
+
16
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+
7
+ import gradio as gr
8
+ import torch
9
+
10
+ from app_canny import create_demo as create_demo_canny
11
+ from app_depth import create_demo as create_demo_depth
12
+ from app_ip2p import create_demo as create_demo_ip2p
13
+ from app_lineart import create_demo as create_demo_lineart
14
+ from app_mlsd import create_demo as create_demo_mlsd
15
+ from app_normal import create_demo as create_demo_normal
16
+ from app_openpose import create_demo as create_demo_openpose
17
+ from app_scribble import create_demo as create_demo_scribble
18
+ from app_scribble_interactive import \
19
+ create_demo as create_demo_scribble_interactive
20
+ from app_segmentation import create_demo as create_demo_segmentation
21
+ from app_shuffle import create_demo as create_demo_shuffle
22
+ from app_softedge import create_demo as create_demo_softedge
23
+ from model import Model
24
+
25
+ DESCRIPTION = '# ControlNet v1.1'
26
+
27
+ SPACE_ID = os.getenv('SPACE_ID')
28
+ ALLOW_CHANGING_BASE_MODEL = SPACE_ID != 'hysts/ControlNet-v1-1'
29
+
30
+ if SPACE_ID is not None:
31
+ DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
32
+
33
+ if not torch.cuda.is_available():
34
+ DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
35
+
36
+ MAX_NUM_IMAGES = int(os.getenv('MAX_NUM_IMAGES', '3'))
37
+ DEFAULT_NUM_IMAGES = min(MAX_NUM_IMAGES,
38
+ int(os.getenv('DEFAULT_NUM_IMAGES', '1')))
39
+
40
+ DEFAULT_MODEL_ID = os.getenv('DEFAULT_MODEL_ID',
41
+ 'runwayml/stable-diffusion-v1-5')
42
+ model = Model(base_model_id=DEFAULT_MODEL_ID, task_name='Canny')
43
+
44
+ with gr.Blocks(css='style.css') as demo:
45
+ gr.Markdown(DESCRIPTION)
46
+ with gr.Tabs():
47
+ with gr.TabItem('Canny'):
48
+ create_demo_canny(model.process_canny,
49
+ max_images=MAX_NUM_IMAGES,
50
+ default_num_images=DEFAULT_NUM_IMAGES)
51
+ with gr.TabItem('MLSD'):
52
+ create_demo_mlsd(model.process_mlsd,
53
+ max_images=MAX_NUM_IMAGES,
54
+ default_num_images=DEFAULT_NUM_IMAGES)
55
+ with gr.TabItem('Scribble'):
56
+ create_demo_scribble(model.process_scribble,
57
+ max_images=MAX_NUM_IMAGES,
58
+ default_num_images=DEFAULT_NUM_IMAGES)
59
+ with gr.TabItem('Scribble Interactive'):
60
+ create_demo_scribble_interactive(
61
+ model.process_scribble_interactive,
62
+ max_images=MAX_NUM_IMAGES,
63
+ default_num_images=DEFAULT_NUM_IMAGES)
64
+ with gr.TabItem('SoftEdge'):
65
+ create_demo_softedge(model.process_softedge,
66
+ max_images=MAX_NUM_IMAGES,
67
+ default_num_images=DEFAULT_NUM_IMAGES)
68
+ with gr.TabItem('OpenPose'):
69
+ create_demo_openpose(model.process_openpose,
70
+ max_images=MAX_NUM_IMAGES,
71
+ default_num_images=DEFAULT_NUM_IMAGES)
72
+ with gr.TabItem('Segmentation'):
73
+ create_demo_segmentation(model.process_segmentation,
74
+ max_images=MAX_NUM_IMAGES,
75
+ default_num_images=DEFAULT_NUM_IMAGES)
76
+ with gr.TabItem('Depth'):
77
+ create_demo_depth(model.process_depth,
78
+ max_images=MAX_NUM_IMAGES,
79
+ default_num_images=DEFAULT_NUM_IMAGES)
80
+ with gr.TabItem('Normal map'):
81
+ create_demo_normal(model.process_normal,
82
+ max_images=MAX_NUM_IMAGES,
83
+ default_num_images=DEFAULT_NUM_IMAGES)
84
+ with gr.TabItem('Lineart'):
85
+ create_demo_lineart(model.process_lineart,
86
+ max_images=MAX_NUM_IMAGES,
87
+ default_num_images=DEFAULT_NUM_IMAGES)
88
+ with gr.TabItem('Content Shuffle'):
89
+ create_demo_shuffle(model.process_shuffle,
90
+ max_images=MAX_NUM_IMAGES,
91
+ default_num_images=DEFAULT_NUM_IMAGES)
92
+ with gr.TabItem('Instruct Pix2Pix'):
93
+ create_demo_ip2p(model.process_ip2p,
94
+ max_images=MAX_NUM_IMAGES,
95
+ default_num_images=DEFAULT_NUM_IMAGES)
96
+
97
+ with gr.Accordion(label='Base model', open=False):
98
+ with gr.Row():
99
+ with gr.Column():
100
+ current_base_model = gr.Text(label='Current base model')
101
+ with gr.Column(scale=0.3):
102
+ check_base_model_button = gr.Button('Check current base model')
103
+ with gr.Row():
104
+ with gr.Column():
105
+ new_base_model_id = gr.Text(
106
+ label='New base model',
107
+ max_lines=1,
108
+ placeholder='runwayml/stable-diffusion-v1-5',
109
+ info=
110
+ 'The base model must be compatible with Stable Diffusion v1.5.',
111
+ interactive=ALLOW_CHANGING_BASE_MODEL)
112
+ with gr.Column(scale=0.3):
113
+ change_base_model_button = gr.Button(
114
+ 'Change base model', interactive=ALLOW_CHANGING_BASE_MODEL)
115
+ if not ALLOW_CHANGING_BASE_MODEL:
116
+ gr.Markdown(
117
+ '''The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a>'''
118
+ )
119
+
120
+ check_base_model_button.click(fn=lambda: model.base_model_id,
121
+ outputs=current_base_model,
122
+ queue=False)
123
+ new_base_model_id.submit(fn=model.set_base_model,
124
+ inputs=new_base_model_id,
125
+ outputs=current_base_model)
126
+ change_base_model_button.click(fn=model.set_base_model,
127
+ inputs=new_base_model_id,
128
+ outputs=current_base_model)
129
+
130
+ demo.queue(max_size=20).launch()
app_canny.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ num_samples = gr.Slider(label='Number of images',
17
+ minimum=1,
18
+ maximum=max_images,
19
+ value=default_num_images,
20
+ step=1)
21
+ image_resolution = gr.Slider(label='Image resolution',
22
+ minimum=256,
23
+ maximum=512,
24
+ value=512,
25
+ step=256)
26
+ canny_low_threshold = gr.Slider(
27
+ label='Canny low threshold',
28
+ minimum=1,
29
+ maximum=255,
30
+ value=100,
31
+ step=1)
32
+ canny_high_threshold = gr.Slider(
33
+ label='Canny high threshold',
34
+ minimum=1,
35
+ maximum=255,
36
+ value=200,
37
+ step=1)
38
+ num_steps = gr.Slider(label='Number of steps',
39
+ minimum=1,
40
+ maximum=100,
41
+ value=20,
42
+ step=1)
43
+ guidance_scale = gr.Slider(label='Guidance scale',
44
+ minimum=0.1,
45
+ maximum=30.0,
46
+ value=9.0,
47
+ step=0.1)
48
+ seed = gr.Slider(label='Seed',
49
+ minimum=0,
50
+ maximum=1000000,
51
+ step=1,
52
+ value=0,
53
+ randomize=True)
54
+ randomize_seed = gr.Checkbox(label='Randomize seed',
55
+ value=True)
56
+ a_prompt = gr.Textbox(
57
+ label='Additional prompt',
58
+ value='best quality, extremely detailed')
59
+ n_prompt = gr.Textbox(
60
+ label='Negative prompt',
61
+ value=
62
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
63
+ )
64
+ with gr.Column():
65
+ result = gr.Gallery(label='Output', show_label=False).style(
66
+ columns=2, object_fit='scale-down')
67
+ inputs = [
68
+ image,
69
+ prompt,
70
+ a_prompt,
71
+ n_prompt,
72
+ num_samples,
73
+ image_resolution,
74
+ num_steps,
75
+ guidance_scale,
76
+ seed,
77
+ canny_low_threshold,
78
+ canny_high_threshold,
79
+ ]
80
+ prompt.submit(
81
+ fn=randomize_seed_fn,
82
+ inputs=[seed, randomize_seed],
83
+ outputs=seed,
84
+ queue=False,
85
+ ).then(
86
+ fn=process,
87
+ inputs=inputs,
88
+ outputs=result,
89
+ )
90
+ run_button.click(
91
+ fn=randomize_seed_fn,
92
+ inputs=[seed, randomize_seed],
93
+ outputs=seed,
94
+ queue=False,
95
+ ).then(
96
+ fn=process,
97
+ inputs=inputs,
98
+ outputs=result,
99
+ api_name='canny',
100
+ )
101
+ return demo
102
+
103
+
104
+ if __name__ == '__main__':
105
+ from model import Model
106
+ model = Model(task_name='Canny')
107
+ demo = create_demo(model.process_canny)
108
+ demo.queue().launch()
app_depth.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ preprocessor_name = gr.Radio(
17
+ label='Preprocessor',
18
+ choices=['Midas', 'DPT', 'None'],
19
+ type='value',
20
+ value='DPT')
21
+ num_samples = gr.Slider(label='Number of images',
22
+ minimum=1,
23
+ maximum=max_images,
24
+ value=default_num_images,
25
+ step=1)
26
+ image_resolution = gr.Slider(label='Image resolution',
27
+ minimum=256,
28
+ maximum=512,
29
+ value=512,
30
+ step=256)
31
+ preprocess_resolution = gr.Slider(
32
+ label='Preprocess resolution',
33
+ minimum=128,
34
+ maximum=512,
35
+ value=384,
36
+ step=1)
37
+ num_steps = gr.Slider(label='Number of steps',
38
+ minimum=1,
39
+ maximum=100,
40
+ value=20,
41
+ step=1)
42
+ guidance_scale = gr.Slider(label='Guidance scale',
43
+ minimum=0.1,
44
+ maximum=30.0,
45
+ value=9.0,
46
+ step=0.1)
47
+ seed = gr.Slider(label='Seed',
48
+ minimum=0,
49
+ maximum=1000000,
50
+ step=1,
51
+ value=0,
52
+ randomize=True)
53
+ randomize_seed = gr.Checkbox(label='Randomize seed',
54
+ value=True)
55
+ a_prompt = gr.Textbox(
56
+ label='Additional prompt',
57
+ value='best quality, extremely detailed')
58
+ n_prompt = gr.Textbox(
59
+ label='Negative prompt',
60
+ value=
61
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
62
+ )
63
+ with gr.Column():
64
+ result = gr.Gallery(label='Output', show_label=False).style(
65
+ columns=2, object_fit='scale-down')
66
+ inputs = [
67
+ image,
68
+ prompt,
69
+ a_prompt,
70
+ n_prompt,
71
+ num_samples,
72
+ image_resolution,
73
+ preprocess_resolution,
74
+ num_steps,
75
+ guidance_scale,
76
+ seed,
77
+ preprocessor_name,
78
+ ]
79
+ prompt.submit(
80
+ fn=randomize_seed_fn,
81
+ inputs=[seed, randomize_seed],
82
+ outputs=seed,
83
+ queue=False,
84
+ ).then(
85
+ fn=process,
86
+ inputs=inputs,
87
+ outputs=result,
88
+ )
89
+ run_button.click(
90
+ fn=randomize_seed_fn,
91
+ inputs=[seed, randomize_seed],
92
+ outputs=seed,
93
+ queue=False,
94
+ ).then(
95
+ fn=process,
96
+ inputs=inputs,
97
+ outputs=result,
98
+ api_name='depth',
99
+ )
100
+ return demo
101
+
102
+
103
+ if __name__ == '__main__':
104
+ from model import Model
105
+ model = Model(task_name='depth')
106
+ demo = create_demo(model.process_depth)
107
+ demo.queue().launch()
app_ip2p.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ num_samples = gr.Slider(label='Number of images',
17
+ minimum=1,
18
+ maximum=max_images,
19
+ value=default_num_images,
20
+ step=1)
21
+ image_resolution = gr.Slider(label='Image resolution',
22
+ minimum=256,
23
+ maximum=512,
24
+ value=512,
25
+ step=256)
26
+ num_steps = gr.Slider(label='Number of steps',
27
+ minimum=1,
28
+ maximum=100,
29
+ value=20,
30
+ step=1)
31
+ guidance_scale = gr.Slider(label='Guidance scale',
32
+ minimum=0.1,
33
+ maximum=30.0,
34
+ value=9.0,
35
+ step=0.1)
36
+ seed = gr.Slider(label='Seed',
37
+ minimum=0,
38
+ maximum=1000000,
39
+ step=1,
40
+ value=0,
41
+ randomize=True)
42
+ randomize_seed = gr.Checkbox(label='Randomize seed',
43
+ value=True)
44
+ a_prompt = gr.Textbox(
45
+ label='Additional prompt',
46
+ value='best quality, extremely detailed')
47
+ n_prompt = gr.Textbox(
48
+ label='Negative prompt',
49
+ value=
50
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
51
+ )
52
+ with gr.Column():
53
+ result = gr.Gallery(label='Output', show_label=False).style(
54
+ columns=2, object_fit='scale-down')
55
+ inputs = [
56
+ image,
57
+ prompt,
58
+ a_prompt,
59
+ n_prompt,
60
+ num_samples,
61
+ image_resolution,
62
+ num_steps,
63
+ guidance_scale,
64
+ seed,
65
+ ]
66
+ prompt.submit(
67
+ fn=randomize_seed_fn,
68
+ inputs=[seed, randomize_seed],
69
+ outputs=seed,
70
+ queue=False,
71
+ ).then(
72
+ fn=process,
73
+ inputs=inputs,
74
+ outputs=result,
75
+ )
76
+ run_button.click(
77
+ fn=randomize_seed_fn,
78
+ inputs=[seed, randomize_seed],
79
+ outputs=seed,
80
+ queue=False,
81
+ ).then(
82
+ fn=process,
83
+ inputs=inputs,
84
+ outputs=result,
85
+ api_name='ip2p',
86
+ )
87
+ return demo
88
+
89
+
90
+ if __name__ == '__main__':
91
+ from model import Model
92
+ model = Model(task_name='ip2p')
93
+ demo = create_demo(model.process_ip2p)
94
+ demo.queue().launch()
app_lineart.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ preprocessor_name = gr.Radio(
17
+ label='Preprocessor',
18
+ choices=[
19
+ 'Lineart',
20
+ 'Lineart coarse',
21
+ 'None',
22
+ 'Lineart (anime)',
23
+ 'None (anime)',
24
+ ],
25
+ type='value',
26
+ value='Lineart',
27
+ info=
28
+ 'Note that "Lineart (anime)" and "None (anime)" are for anime base models like Anything-v3.'
29
+ )
30
+ num_samples = gr.Slider(label='Number of images',
31
+ minimum=1,
32
+ maximum=max_images,
33
+ value=default_num_images,
34
+ step=1)
35
+ image_resolution = gr.Slider(label='Image resolution',
36
+ minimum=256,
37
+ maximum=512,
38
+ value=512,
39
+ step=256)
40
+ preprocess_resolution = gr.Slider(
41
+ label='Preprocess resolution',
42
+ minimum=128,
43
+ maximum=512,
44
+ value=512,
45
+ step=1)
46
+ num_steps = gr.Slider(label='Number of steps',
47
+ minimum=1,
48
+ maximum=100,
49
+ value=20,
50
+ step=1)
51
+ guidance_scale = gr.Slider(label='Guidance scale',
52
+ minimum=0.1,
53
+ maximum=30.0,
54
+ value=9.0,
55
+ step=0.1)
56
+ seed = gr.Slider(label='Seed',
57
+ minimum=0,
58
+ maximum=1000000,
59
+ step=1,
60
+ value=0,
61
+ randomize=True)
62
+ randomize_seed = gr.Checkbox(label='Randomize seed',
63
+ value=True)
64
+ a_prompt = gr.Textbox(
65
+ label='Additional prompt',
66
+ value='best quality, extremely detailed')
67
+ n_prompt = gr.Textbox(
68
+ label='Negative prompt',
69
+ value=
70
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
71
+ )
72
+ with gr.Column():
73
+ result = gr.Gallery(label='Output', show_label=False).style(
74
+ columns=2, object_fit='scale-down')
75
+ inputs = [
76
+ image,
77
+ prompt,
78
+ a_prompt,
79
+ n_prompt,
80
+ num_samples,
81
+ image_resolution,
82
+ preprocess_resolution,
83
+ num_steps,
84
+ guidance_scale,
85
+ seed,
86
+ preprocessor_name,
87
+ ]
88
+ prompt.submit(
89
+ fn=randomize_seed_fn,
90
+ inputs=[seed, randomize_seed],
91
+ outputs=seed,
92
+ queue=False,
93
+ ).then(
94
+ fn=process,
95
+ inputs=inputs,
96
+ outputs=result,
97
+ )
98
+ run_button.click(
99
+ fn=randomize_seed_fn,
100
+ inputs=[seed, randomize_seed],
101
+ outputs=seed,
102
+ queue=False,
103
+ ).then(
104
+ fn=process,
105
+ inputs=inputs,
106
+ outputs=result,
107
+ api_name='lineart',
108
+ )
109
+ return demo
110
+
111
+
112
+ if __name__ == '__main__':
113
+ from model import Model
114
+ model = Model(task_name='lineart')
115
+ demo = create_demo(model.process_lineart)
116
+ demo.queue().launch()
app_mlsd.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ num_samples = gr.Slider(label='Number of images',
17
+ minimum=1,
18
+ maximum=max_images,
19
+ value=default_num_images,
20
+ step=1)
21
+ image_resolution = gr.Slider(label='Image resolution',
22
+ minimum=256,
23
+ maximum=512,
24
+ value=512,
25
+ step=256)
26
+ preprocess_resolution = gr.Slider(
27
+ label='Preprocess resolution',
28
+ minimum=128,
29
+ maximum=512,
30
+ value=512,
31
+ step=1)
32
+ mlsd_value_threshold = gr.Slider(
33
+ label='Hough value threshold (MLSD)',
34
+ minimum=0.01,
35
+ maximum=2.0,
36
+ value=0.1,
37
+ step=0.01)
38
+ mlsd_distance_threshold = gr.Slider(
39
+ label='Hough distance threshold (MLSD)',
40
+ minimum=0.01,
41
+ maximum=20.0,
42
+ value=0.1,
43
+ step=0.01)
44
+ num_steps = gr.Slider(label='Number of steps',
45
+ minimum=1,
46
+ maximum=100,
47
+ value=20,
48
+ step=1)
49
+ guidance_scale = gr.Slider(label='Guidance scale',
50
+ minimum=0.1,
51
+ maximum=30.0,
52
+ value=9.0,
53
+ step=0.1)
54
+ seed = gr.Slider(label='Seed',
55
+ minimum=0,
56
+ maximum=1000000,
57
+ step=1,
58
+ value=0,
59
+ randomize=True)
60
+ randomize_seed = gr.Checkbox(label='Randomize seed',
61
+ value=True)
62
+ a_prompt = gr.Textbox(
63
+ label='Additional prompt',
64
+ value='best quality, extremely detailed')
65
+ n_prompt = gr.Textbox(
66
+ label='Negative prompt',
67
+ value=
68
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
69
+ )
70
+ with gr.Column():
71
+ result = gr.Gallery(label='Output', show_label=False).style(
72
+ columns=2, object_fit='scale-down')
73
+ inputs = [
74
+ image,
75
+ prompt,
76
+ a_prompt,
77
+ n_prompt,
78
+ num_samples,
79
+ image_resolution,
80
+ preprocess_resolution,
81
+ num_steps,
82
+ guidance_scale,
83
+ seed,
84
+ mlsd_value_threshold,
85
+ mlsd_distance_threshold,
86
+ ]
87
+ prompt.submit(
88
+ fn=randomize_seed_fn,
89
+ inputs=[seed, randomize_seed],
90
+ outputs=seed,
91
+ queue=False,
92
+ ).then(
93
+ fn=process,
94
+ inputs=inputs,
95
+ outputs=result,
96
+ )
97
+ run_button.click(
98
+ fn=randomize_seed_fn,
99
+ inputs=[seed, randomize_seed],
100
+ outputs=seed,
101
+ queue=False,
102
+ ).then(
103
+ fn=process,
104
+ inputs=inputs,
105
+ outputs=result,
106
+ api_name='mlsd',
107
+ )
108
+ return demo
109
+
110
+
111
+ if __name__ == '__main__':
112
+ from model import Model
113
+ model = Model(task_name='MLSD')
114
+ demo = create_demo(model.process_mlsd)
115
+ demo.queue().launch()
app_normal.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ preprocessor_name = gr.Radio(label='Preprocessor',
17
+ choices=['NormalBae', 'None'],
18
+ type='value',
19
+ value='NormalBae')
20
+ num_samples = gr.Slider(label='Images',
21
+ minimum=1,
22
+ maximum=max_images,
23
+ value=default_num_images,
24
+ step=1)
25
+ image_resolution = gr.Slider(label='Image resolution',
26
+ minimum=256,
27
+ maximum=512,
28
+ value=512,
29
+ step=256)
30
+ preprocess_resolution = gr.Slider(
31
+ label='Preprocess resolution',
32
+ minimum=128,
33
+ maximum=512,
34
+ value=384,
35
+ step=1)
36
+ num_steps = gr.Slider(label='Number of steps',
37
+ minimum=1,
38
+ maximum=100,
39
+ value=20,
40
+ step=1)
41
+ guidance_scale = gr.Slider(label='Guidance scale',
42
+ minimum=0.1,
43
+ maximum=30.0,
44
+ value=9.0,
45
+ step=0.1)
46
+ seed = gr.Slider(label='Seed',
47
+ minimum=0,
48
+ maximum=1000000,
49
+ step=1,
50
+ value=0,
51
+ randomize=True)
52
+ randomize_seed = gr.Checkbox(label='Randomize seed',
53
+ value=True)
54
+ a_prompt = gr.Textbox(
55
+ label='Additional prompt',
56
+ value='best quality, extremely detailed')
57
+ n_prompt = gr.Textbox(
58
+ label='Negative prompt',
59
+ value=
60
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
61
+ )
62
+ with gr.Column():
63
+ result = gr.Gallery(label='Output', show_label=False).style(
64
+ columns=2, object_fit='scale-down')
65
+ inputs = [
66
+ image,
67
+ prompt,
68
+ a_prompt,
69
+ n_prompt,
70
+ num_samples,
71
+ image_resolution,
72
+ preprocess_resolution,
73
+ num_steps,
74
+ guidance_scale,
75
+ seed,
76
+ preprocessor_name,
77
+ ]
78
+ prompt.submit(
79
+ fn=randomize_seed_fn,
80
+ inputs=[seed, randomize_seed],
81
+ outputs=seed,
82
+ queue=False,
83
+ ).then(
84
+ fn=process,
85
+ inputs=inputs,
86
+ outputs=result,
87
+ )
88
+ run_button.click(
89
+ fn=randomize_seed_fn,
90
+ inputs=[seed, randomize_seed],
91
+ outputs=seed,
92
+ queue=False,
93
+ ).then(
94
+ fn=process,
95
+ inputs=inputs,
96
+ outputs=result,
97
+ api_name='normal',
98
+ )
99
+ return demo
100
+
101
+
102
+ if __name__ == '__main__':
103
+ from model import Model
104
+ model = Model(task_name='NormalBae')
105
+ demo = create_demo(model.process_normal)
106
+ demo.queue().launch()
app_openpose.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button(label='Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ preprocessor_name = gr.Radio(label='Preprocessor',
17
+ choices=['Openpose', 'None'],
18
+ type='value',
19
+ value='Openpose')
20
+ num_samples = gr.Slider(label='Number of images',
21
+ minimum=1,
22
+ maximum=max_images,
23
+ value=default_num_images,
24
+ step=1)
25
+ image_resolution = gr.Slider(label='Image resolution',
26
+ minimum=256,
27
+ maximum=512,
28
+ value=512,
29
+ step=256)
30
+ preprocess_resolution = gr.Slider(
31
+ label='Preprocess resolution',
32
+ minimum=128,
33
+ maximum=512,
34
+ value=512,
35
+ step=1)
36
+ num_steps = gr.Slider(label='Number of steps',
37
+ minimum=1,
38
+ maximum=100,
39
+ value=20,
40
+ step=1)
41
+ guidance_scale = gr.Slider(label='Guidance scale',
42
+ minimum=0.1,
43
+ maximum=30.0,
44
+ value=9.0,
45
+ step=0.1)
46
+ seed = gr.Slider(label='Seed',
47
+ minimum=0,
48
+ maximum=1000000,
49
+ step=1,
50
+ value=0,
51
+ randomize=True)
52
+ randomize_seed = gr.Checkbox(label='Randomize seed',
53
+ value=True)
54
+ a_prompt = gr.Textbox(
55
+ label='Additional prompt',
56
+ value='best quality, extremely detailed')
57
+ n_prompt = gr.Textbox(
58
+ label='Negative prompt',
59
+ value=
60
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
61
+ )
62
+ with gr.Column():
63
+ result = gr.Gallery(label='Output', show_label=False).style(
64
+ columns=2, object_fit='scale-down')
65
+ inputs = [
66
+ image,
67
+ prompt,
68
+ a_prompt,
69
+ n_prompt,
70
+ num_samples,
71
+ image_resolution,
72
+ preprocess_resolution,
73
+ num_steps,
74
+ guidance_scale,
75
+ seed,
76
+ preprocessor_name,
77
+ ]
78
+ prompt.submit(
79
+ fn=randomize_seed_fn,
80
+ inputs=[seed, randomize_seed],
81
+ outputs=seed,
82
+ queue=False,
83
+ ).then(
84
+ fn=process,
85
+ inputs=inputs,
86
+ outputs=result,
87
+ )
88
+ run_button.click(
89
+ fn=randomize_seed_fn,
90
+ inputs=[seed, randomize_seed],
91
+ outputs=seed,
92
+ queue=False,
93
+ ).then(
94
+ fn=process,
95
+ inputs=inputs,
96
+ outputs=result,
97
+ api_name='openpose',
98
+ )
99
+ return demo
100
+
101
+
102
+ if __name__ == '__main__':
103
+ from model import Model
104
+ model = Model(task_name='Openpose')
105
+ demo = create_demo(model.process_openpose)
106
+ demo.queue().launch()
app_scribble.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ preprocessor_name = gr.Radio(
17
+ label='Preprocessor',
18
+ choices=['HED', 'PidiNet', 'None'],
19
+ type='value',
20
+ value='HED')
21
+ num_samples = gr.Slider(label='Number of images',
22
+ minimum=1,
23
+ maximum=max_images,
24
+ value=default_num_images,
25
+ step=1)
26
+ image_resolution = gr.Slider(label='Image resolution',
27
+ minimum=256,
28
+ maximum=512,
29
+ value=512,
30
+ step=256)
31
+ preprocess_resolution = gr.Slider(
32
+ label='Preprocess resolution',
33
+ minimum=128,
34
+ maximum=512,
35
+ value=512,
36
+ step=1)
37
+ num_steps = gr.Slider(label='Number of steps',
38
+ minimum=1,
39
+ maximum=100,
40
+ value=20,
41
+ step=1)
42
+ guidance_scale = gr.Slider(label='Guidance scale',
43
+ minimum=0.1,
44
+ maximum=30.0,
45
+ value=9.0,
46
+ step=0.1)
47
+ seed = gr.Slider(label='Seed',
48
+ minimum=0,
49
+ maximum=1000000,
50
+ step=1,
51
+ value=0,
52
+ randomize=True)
53
+ randomize_seed = gr.Checkbox(label='Randomize seed',
54
+ value=True)
55
+ a_prompt = gr.Textbox(
56
+ label='Additional prompt',
57
+ value='best quality, extremely detailed')
58
+ n_prompt = gr.Textbox(
59
+ label='Negative prompt',
60
+ value=
61
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
62
+ )
63
+ with gr.Column():
64
+ result = gr.Gallery(label='Output', show_label=False).style(
65
+ columns=2, object_fit='scale-down')
66
+ inputs = [
67
+ image,
68
+ prompt,
69
+ a_prompt,
70
+ n_prompt,
71
+ num_samples,
72
+ image_resolution,
73
+ preprocess_resolution,
74
+ num_steps,
75
+ guidance_scale,
76
+ seed,
77
+ preprocessor_name,
78
+ ]
79
+ prompt.submit(
80
+ fn=randomize_seed_fn,
81
+ inputs=[seed, randomize_seed],
82
+ outputs=seed,
83
+ queue=False,
84
+ ).then(
85
+ fn=process,
86
+ inputs=inputs,
87
+ outputs=result,
88
+ )
89
+ run_button.click(
90
+ fn=randomize_seed_fn,
91
+ inputs=[seed, randomize_seed],
92
+ outputs=seed,
93
+ queue=False,
94
+ ).then(
95
+ fn=process,
96
+ inputs=inputs,
97
+ outputs=result,
98
+ api_name='scribble',
99
+ )
100
+ return demo
101
+
102
+
103
+ if __name__ == '__main__':
104
+ from model import Model
105
+ model = Model(task_name='scribble')
106
+ demo = create_demo(model.process_scribble)
107
+ demo.queue().launch()
app_scribble_interactive.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+
6
+ from utils import randomize_seed_fn
7
+
8
+
9
+ def create_canvas(w, h):
10
+ return np.zeros(shape=(h, w, 3), dtype=np.uint8) + 255
11
+
12
+
13
+ def create_demo(process, max_images=12, default_num_images=3):
14
+ with gr.Blocks() as demo:
15
+ with gr.Row():
16
+ with gr.Column():
17
+ canvas_width = gr.Slider(label='Canvas width',
18
+ minimum=256,
19
+ maximum=512,
20
+ value=512,
21
+ step=1)
22
+ canvas_height = gr.Slider(label='Canvas height',
23
+ minimum=256,
24
+ maximum=512,
25
+ value=512,
26
+ step=1)
27
+ create_button = gr.Button('Open drawing canvas!')
28
+ image = gr.Image(tool='sketch', brush_radius=10)
29
+ prompt = gr.Textbox(label='Prompt')
30
+ run_button = gr.Button('Run')
31
+ with gr.Accordion('Advanced options', open=False):
32
+ num_samples = gr.Slider(label='Number of images',
33
+ minimum=1,
34
+ maximum=max_images,
35
+ value=default_num_images,
36
+ step=1)
37
+ image_resolution = gr.Slider(label='Image resolution',
38
+ minimum=256,
39
+ maximum=512,
40
+ value=512,
41
+ step=256)
42
+ num_steps = gr.Slider(label='Number of steps',
43
+ minimum=1,
44
+ maximum=100,
45
+ value=20,
46
+ step=1)
47
+ guidance_scale = gr.Slider(label='Guidance scale',
48
+ minimum=0.1,
49
+ maximum=30.0,
50
+ value=9.0,
51
+ step=0.1)
52
+ seed = gr.Slider(label='Seed',
53
+ minimum=0,
54
+ maximum=1000000,
55
+ step=1,
56
+ value=0,
57
+ randomize=True)
58
+ randomize_seed = gr.Checkbox(label='Randomize seed',
59
+ value=True)
60
+ a_prompt = gr.Textbox(
61
+ label='Additional prompt',
62
+ value='best quality, extremely detailed')
63
+ n_prompt = gr.Textbox(
64
+ label='Negative prompt',
65
+ value=
66
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
67
+ )
68
+ with gr.Column():
69
+ result = gr.Gallery(label='Output', show_label=False).style(
70
+ columns=2, object_fit='scale-down')
71
+
72
+ create_button.click(fn=create_canvas,
73
+ inputs=[canvas_width, canvas_height],
74
+ outputs=image,
75
+ queue=False)
76
+ inputs = [
77
+ image,
78
+ prompt,
79
+ a_prompt,
80
+ n_prompt,
81
+ num_samples,
82
+ image_resolution,
83
+ num_steps,
84
+ guidance_scale,
85
+ seed,
86
+ ]
87
+ prompt.submit(
88
+ fn=randomize_seed_fn,
89
+ inputs=[seed, randomize_seed],
90
+ outputs=seed,
91
+ queue=False,
92
+ ).then(
93
+ fn=process,
94
+ inputs=inputs,
95
+ outputs=result,
96
+ )
97
+ run_button.click(
98
+ fn=randomize_seed_fn,
99
+ inputs=[seed, randomize_seed],
100
+ outputs=seed,
101
+ queue=False,
102
+ ).then(
103
+ fn=process,
104
+ inputs=inputs,
105
+ outputs=result,
106
+ )
107
+ return demo
108
+
109
+
110
+ if __name__ == '__main__':
111
+ from model import Model
112
+ model = Model(task_name='scribble')
113
+ demo = create_demo(model.process_scribble_interactive)
114
+ demo.queue().launch()
app_segmentation.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ preprocessor_name = gr.Radio(label='Preprocessor',
17
+ choices=['UPerNet', 'None'],
18
+ type='value',
19
+ value='UPerNet')
20
+ num_samples = gr.Slider(label='Number of images',
21
+ minimum=1,
22
+ maximum=max_images,
23
+ value=default_num_images,
24
+ step=1)
25
+ image_resolution = gr.Slider(label='Image resolution',
26
+ minimum=256,
27
+ maximum=512,
28
+ value=512,
29
+ step=256)
30
+ preprocess_resolution = gr.Slider(
31
+ label='Preprocess resolution',
32
+ minimum=128,
33
+ maximum=512,
34
+ value=512,
35
+ step=1)
36
+ num_steps = gr.Slider(label='Number of steps',
37
+ minimum=1,
38
+ maximum=100,
39
+ value=20,
40
+ step=1)
41
+ guidance_scale = gr.Slider(label='Guidance scale',
42
+ minimum=0.1,
43
+ maximum=30.0,
44
+ value=9.0,
45
+ step=0.1)
46
+ seed = gr.Slider(label='Seed',
47
+ minimum=0,
48
+ maximum=1000000,
49
+ step=1,
50
+ value=0,
51
+ randomize=True)
52
+ randomize_seed = gr.Checkbox(label='Randomize seed',
53
+ value=True)
54
+ a_prompt = gr.Textbox(
55
+ label='Additional prompt',
56
+ value='best quality, extremely detailed')
57
+ n_prompt = gr.Textbox(
58
+ label='Negative prompt',
59
+ value=
60
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
61
+ )
62
+ with gr.Column():
63
+ result = gr.Gallery(label='Output', show_label=False).style(
64
+ columns=2, object_fit='scale-down')
65
+ inputs = [
66
+ image,
67
+ prompt,
68
+ a_prompt,
69
+ n_prompt,
70
+ num_samples,
71
+ image_resolution,
72
+ preprocess_resolution,
73
+ num_steps,
74
+ guidance_scale,
75
+ seed,
76
+ preprocessor_name,
77
+ ]
78
+ prompt.submit(
79
+ fn=randomize_seed_fn,
80
+ inputs=[seed, randomize_seed],
81
+ outputs=seed,
82
+ queue=False,
83
+ ).then(
84
+ fn=process,
85
+ inputs=inputs,
86
+ outputs=result,
87
+ )
88
+ run_button.click(
89
+ fn=randomize_seed_fn,
90
+ inputs=[seed, randomize_seed],
91
+ outputs=seed,
92
+ queue=False,
93
+ ).then(
94
+ fn=process,
95
+ inputs=inputs,
96
+ outputs=result,
97
+ api_name='segmentation',
98
+ )
99
+ return demo
100
+
101
+
102
+ if __name__ == '__main__':
103
+ from model import Model
104
+ model = Model(task_name='segmentation')
105
+ demo = create_demo(model.process_segmentation)
106
+ demo.queue().launch()
app_shuffle.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ preprocessor_name = gr.Radio(
17
+ label='Preprocessor',
18
+ choices=['ContentShuffle', 'None'],
19
+ type='value',
20
+ value='ContentShuffle')
21
+ num_samples = gr.Slider(label='Number of images',
22
+ minimum=1,
23
+ maximum=max_images,
24
+ value=default_num_images,
25
+ step=1)
26
+ image_resolution = gr.Slider(label='Image resolution',
27
+ minimum=256,
28
+ maximum=512,
29
+ value=512,
30
+ step=256)
31
+ num_steps = gr.Slider(label='Number of steps',
32
+ minimum=1,
33
+ maximum=100,
34
+ value=20,
35
+ step=1)
36
+ guidance_scale = gr.Slider(label='Guidance scale',
37
+ minimum=0.1,
38
+ maximum=30.0,
39
+ value=9.0,
40
+ step=0.1)
41
+ seed = gr.Slider(label='Seed',
42
+ minimum=0,
43
+ maximum=1000000,
44
+ step=1,
45
+ value=0,
46
+ randomize=True)
47
+ randomize_seed = gr.Checkbox(label='Randomize seed',
48
+ value=True)
49
+ a_prompt = gr.Textbox(
50
+ label='Additional prompt',
51
+ value='best quality, extremely detailed')
52
+ n_prompt = gr.Textbox(
53
+ label='Negative prompt',
54
+ value=
55
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
56
+ )
57
+ with gr.Column():
58
+ result = gr.Gallery(label='Output', show_label=False).style(
59
+ columns=2, object_fit='scale-down')
60
+ inputs = [
61
+ image,
62
+ prompt,
63
+ a_prompt,
64
+ n_prompt,
65
+ num_samples,
66
+ image_resolution,
67
+ num_steps,
68
+ guidance_scale,
69
+ seed,
70
+ preprocessor_name,
71
+ ]
72
+ prompt.submit(
73
+ fn=randomize_seed_fn,
74
+ inputs=[seed, randomize_seed],
75
+ outputs=seed,
76
+ queue=False,
77
+ ).then(
78
+ fn=process,
79
+ inputs=inputs,
80
+ outputs=result,
81
+ )
82
+ run_button.click(
83
+ fn=randomize_seed_fn,
84
+ inputs=[seed, randomize_seed],
85
+ outputs=seed,
86
+ queue=False,
87
+ ).then(
88
+ fn=process,
89
+ inputs=inputs,
90
+ outputs=result,
91
+ api_name='content-shuffle',
92
+ )
93
+ return demo
94
+
95
+
96
+ if __name__ == '__main__':
97
+ from model import Model
98
+ model = Model(task_name='shuffle')
99
+ demo = create_demo(model.process_shuffle)
100
+ demo.queue().launch()
app_softedge.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import gradio as gr
4
+
5
+ from utils import randomize_seed_fn
6
+
7
+
8
+ def create_demo(process, max_images=12, default_num_images=3):
9
+ with gr.Blocks() as demo:
10
+ with gr.Row():
11
+ with gr.Column():
12
+ image = gr.Image()
13
+ prompt = gr.Textbox(label='Prompt')
14
+ run_button = gr.Button('Run')
15
+ with gr.Accordion('Advanced options', open=False):
16
+ preprocessor_name = gr.Radio(label='Preprocessor',
17
+ choices=[
18
+ 'HED',
19
+ 'PidiNet',
20
+ 'HED safe',
21
+ 'PidiNet safe',
22
+ 'None',
23
+ ],
24
+ type='value',
25
+ value='PidiNet')
26
+ num_samples = gr.Slider(label='Number of images',
27
+ minimum=1,
28
+ maximum=max_images,
29
+ value=default_num_images,
30
+ step=1)
31
+ image_resolution = gr.Slider(label='Image resolution',
32
+ minimum=256,
33
+ maximum=512,
34
+ value=512,
35
+ step=256)
36
+ preprocess_resolution = gr.Slider(
37
+ label='Preprocess resolution',
38
+ minimum=128,
39
+ maximum=512,
40
+ value=512,
41
+ step=1)
42
+ num_steps = gr.Slider(label='Number of steps',
43
+ minimum=1,
44
+ maximum=100,
45
+ value=20,
46
+ step=1)
47
+ guidance_scale = gr.Slider(label='Guidance scale',
48
+ minimum=0.1,
49
+ maximum=30.0,
50
+ value=9.0,
51
+ step=0.1)
52
+ seed = gr.Slider(label='Seed',
53
+ minimum=0,
54
+ maximum=1000000,
55
+ step=1,
56
+ value=0,
57
+ randomize=True)
58
+ randomize_seed = gr.Checkbox(label='Randomize seed',
59
+ value=True)
60
+ a_prompt = gr.Textbox(
61
+ label='Additional prompt',
62
+ value='best quality, extremely detailed')
63
+ n_prompt = gr.Textbox(
64
+ label='Negative prompt',
65
+ value=
66
+ 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
67
+ )
68
+ with gr.Column():
69
+ result = gr.Gallery(label='Output', show_label=False).style(
70
+ columns=2, object_fit='scale-down')
71
+ inputs = [
72
+ image,
73
+ prompt,
74
+ a_prompt,
75
+ n_prompt,
76
+ num_samples,
77
+ image_resolution,
78
+ preprocess_resolution,
79
+ num_steps,
80
+ guidance_scale,
81
+ seed,
82
+ preprocessor_name,
83
+ ]
84
+ prompt.submit(
85
+ fn=randomize_seed_fn,
86
+ inputs=[seed, randomize_seed],
87
+ outputs=seed,
88
+ queue=False,
89
+ ).then(
90
+ fn=process,
91
+ inputs=inputs,
92
+ outputs=result,
93
+ )
94
+ run_button.click(
95
+ fn=randomize_seed_fn,
96
+ inputs=[seed, randomize_seed],
97
+ outputs=seed,
98
+ queue=False,
99
+ ).then(
100
+ fn=process,
101
+ inputs=inputs,
102
+ outputs=result,
103
+ api_name='softedge',
104
+ )
105
+ return demo
106
+
107
+
108
+ if __name__ == '__main__':
109
+ from model import Model
110
+ model = Model(task_name='softedge')
111
+ demo = create_demo(model.process_softedge)
112
+ demo.queue().launch()
cv_utils.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+
5
+ def resize_image(input_image, resolution, interpolation=None):
6
+ H, W, C = input_image.shape
7
+ H = float(H)
8
+ W = float(W)
9
+ k = float(resolution) / max(H, W)
10
+ H *= k
11
+ W *= k
12
+ H = int(np.round(H / 64.0)) * 64
13
+ W = int(np.round(W / 64.0)) * 64
14
+ if interpolation is None:
15
+ interpolation = cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA
16
+ img = cv2.resize(input_image, (W, H), interpolation=interpolation)
17
+ return img
depth_estimator.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import PIL.Image
3
+ from controlnet_aux.util import HWC3
4
+ from transformers import pipeline
5
+
6
+ from cv_utils import resize_image
7
+
8
+
9
+ class DepthEstimator:
10
+ def __init__(self):
11
+ self.model = pipeline('depth-estimation')
12
+
13
+ def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
14
+ detect_resolution = kwargs.pop('detect_resolution', 512)
15
+ image_resolution = kwargs.pop('image_resolution', 512)
16
+ image = np.array(image)
17
+ image = HWC3(image)
18
+ image = resize_image(image, resolution=detect_resolution)
19
+ image = PIL.Image.fromarray(image)
20
+ image = self.model(image)
21
+ image = image['depth']
22
+ image = np.array(image)
23
+ image = HWC3(image)
24
+ image = resize_image(image, resolution=image_resolution)
25
+ return PIL.Image.fromarray(image)
image_segmentor.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import PIL.Image
4
+ import torch
5
+ from controlnet_aux.util import HWC3, ade_palette
6
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
7
+
8
+ from cv_utils import resize_image
9
+
10
+
11
+ class ImageSegmentor:
12
+ def __init__(self):
13
+ self.image_processor = AutoImageProcessor.from_pretrained(
14
+ 'openmmlab/upernet-convnext-small')
15
+ self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained(
16
+ 'openmmlab/upernet-convnext-small')
17
+
18
+ @torch.inference_mode()
19
+ def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
20
+ detect_resolution = kwargs.pop('detect_resolution', 512)
21
+ image_resolution = kwargs.pop('image_resolution', 512)
22
+ image = HWC3(image)
23
+ image = resize_image(image, resolution=detect_resolution)
24
+ image = PIL.Image.fromarray(image)
25
+
26
+ pixel_values = self.image_processor(image,
27
+ return_tensors='pt').pixel_values
28
+ outputs = self.image_segmentor(pixel_values)
29
+ seg = self.image_processor.post_process_semantic_segmentation(
30
+ outputs, target_sizes=[image.size[::-1]])[0]
31
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
32
+ for label, color in enumerate(ade_palette()):
33
+ color_seg[seg == label, :] = color
34
+ color_seg = color_seg.astype(np.uint8)
35
+
36
+ color_seg = resize_image(color_seg,
37
+ resolution=image_resolution,
38
+ interpolation=cv2.INTER_NEAREST)
39
+ return PIL.Image.fromarray(color_seg)
model.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gc
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from controlnet_aux.util import HWC3
9
+ from diffusers import (ControlNetModel, DiffusionPipeline,
10
+ StableDiffusionControlNetPipeline,
11
+ UniPCMultistepScheduler)
12
+
13
+ from cv_utils import resize_image
14
+ from preprocessor import Preprocessor
15
+
16
+ CONTROLNET_MODEL_IDS = {
17
+ 'Openpose': 'lllyasviel/control_v11p_sd15_openpose',
18
+ 'Canny': 'lllyasviel/control_v11p_sd15_canny',
19
+ 'MLSD': 'lllyasviel/control_v11p_sd15_mlsd',
20
+ 'scribble': 'lllyasviel/control_v11p_sd15_scribble',
21
+ 'softedge': 'lllyasviel/control_v11p_sd15_softedge',
22
+ 'segmentation': 'lllyasviel/control_v11p_sd15_seg',
23
+ 'depth': 'lllyasviel/control_v11f1p_sd15_depth',
24
+ 'NormalBae': 'lllyasviel/control_v11p_sd15_normalbae',
25
+ 'lineart': 'lllyasviel/control_v11p_sd15_lineart',
26
+ 'lineart_anime': 'lllyasviel/control_v11p_sd15s2_lineart_anime',
27
+ 'shuffle': 'lllyasviel/control_v11e_sd15_shuffle',
28
+ 'ip2p': 'lllyasviel/control_v11e_sd15_ip2p',
29
+ 'inpaint': 'lllyasviel/control_v11e_sd15_inpaint',
30
+ }
31
+
32
+
33
+ def download_all_controlnet_weights() -> None:
34
+ for model_id in CONTROLNET_MODEL_IDS.values():
35
+ ControlNetModel.from_pretrained(model_id)
36
+
37
+
38
+ class Model:
39
+ def __init__(self,
40
+ base_model_id: str = 'runwayml/stable-diffusion-v1-5',
41
+ task_name: str = 'Canny'):
42
+ self.device = torch.device(
43
+ 'cuda:0' if torch.cuda.is_available() else 'cpu')
44
+ self.base_model_id = ''
45
+ self.task_name = ''
46
+ self.pipe = self.load_pipe(base_model_id, task_name)
47
+ self.preprocessor = Preprocessor()
48
+
49
+ def load_pipe(self, base_model_id: str, task_name) -> DiffusionPipeline:
50
+ if base_model_id == self.base_model_id and task_name == self.task_name and hasattr(
51
+ self, 'pipe') and self.pipe is not None:
52
+ return self.pipe
53
+ model_id = CONTROLNET_MODEL_IDS[task_name]
54
+ controlnet = ControlNetModel.from_pretrained(model_id,
55
+ torch_dtype=torch.float16)
56
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
57
+ base_model_id,
58
+ safety_checker=None,
59
+ controlnet=controlnet,
60
+ torch_dtype=torch.float16)
61
+ pipe.scheduler = UniPCMultistepScheduler.from_config(
62
+ pipe.scheduler.config)
63
+ if self.device.type == 'cuda':
64
+ pipe.enable_xformers_memory_efficient_attention()
65
+ pipe.to(self.device)
66
+ torch.cuda.empty_cache()
67
+ gc.collect()
68
+ self.base_model_id = base_model_id
69
+ self.task_name = task_name
70
+ return pipe
71
+
72
+ def set_base_model(self, base_model_id: str) -> str:
73
+ if not base_model_id or base_model_id == self.base_model_id:
74
+ return self.base_model_id
75
+ del self.pipe
76
+ torch.cuda.empty_cache()
77
+ gc.collect()
78
+ try:
79
+ self.pipe = self.load_pipe(base_model_id, self.task_name)
80
+ except Exception:
81
+ self.pipe = self.load_pipe(self.base_model_id, self.task_name)
82
+ return self.base_model_id
83
+
84
+ def load_controlnet_weight(self, task_name: str) -> None:
85
+ if task_name == self.task_name:
86
+ return
87
+ if self.pipe is not None and hasattr(self.pipe, 'controlnet'):
88
+ del self.pipe.controlnet
89
+ torch.cuda.empty_cache()
90
+ gc.collect()
91
+ model_id = CONTROLNET_MODEL_IDS[task_name]
92
+ controlnet = ControlNetModel.from_pretrained(model_id,
93
+ torch_dtype=torch.float16)
94
+ controlnet.to(self.device)
95
+ torch.cuda.empty_cache()
96
+ gc.collect()
97
+ self.pipe.controlnet = controlnet
98
+ self.task_name = task_name
99
+
100
+ def get_prompt(self, prompt: str, additional_prompt: str) -> str:
101
+ if not prompt:
102
+ prompt = additional_prompt
103
+ else:
104
+ prompt = f'{prompt}, {additional_prompt}'
105
+ return prompt
106
+
107
+ @torch.autocast('cuda')
108
+ def run_pipe(
109
+ self,
110
+ prompt: str,
111
+ negative_prompt: str,
112
+ control_image: PIL.Image.Image,
113
+ num_images: int,
114
+ num_steps: int,
115
+ guidance_scale: float,
116
+ seed: int,
117
+ ) -> list[PIL.Image.Image]:
118
+ if seed == -1:
119
+ seed = np.random.randint(0, np.iinfo(np.int64).max)
120
+ generator = torch.Generator().manual_seed(seed)
121
+ return self.pipe(prompt=prompt,
122
+ negative_prompt=negative_prompt,
123
+ guidance_scale=guidance_scale,
124
+ num_images_per_prompt=num_images,
125
+ num_inference_steps=num_steps,
126
+ generator=generator,
127
+ image=control_image).images
128
+
129
+ @torch.inference_mode()
130
+ def process_canny(
131
+ self,
132
+ image: np.ndarray,
133
+ prompt: str,
134
+ additional_prompt: str,
135
+ negative_prompt: str,
136
+ num_images: int,
137
+ image_resolution: int,
138
+ num_steps: int,
139
+ guidance_scale: float,
140
+ seed: int,
141
+ low_threshold: int,
142
+ high_threshold: int,
143
+ ) -> list[PIL.Image.Image]:
144
+ self.preprocessor.load('Canny')
145
+ control_image = self.preprocessor(image=image,
146
+ low_threshold=low_threshold,
147
+ high_threshold=high_threshold,
148
+ detect_resolution=image_resolution)
149
+
150
+ self.load_controlnet_weight('Canny')
151
+ results = self.run_pipe(
152
+ prompt=self.get_prompt(prompt, additional_prompt),
153
+ negative_prompt=negative_prompt,
154
+ control_image=control_image,
155
+ num_images=num_images,
156
+ num_steps=num_steps,
157
+ guidance_scale=guidance_scale,
158
+ seed=seed,
159
+ )
160
+ return [control_image] + results
161
+
162
+ @torch.inference_mode()
163
+ def process_mlsd(
164
+ self,
165
+ image: np.ndarray,
166
+ prompt: str,
167
+ additional_prompt: str,
168
+ negative_prompt: str,
169
+ num_images: int,
170
+ image_resolution: int,
171
+ preprocess_resolution: int,
172
+ num_steps: int,
173
+ guidance_scale: float,
174
+ seed: int,
175
+ value_threshold: float,
176
+ distance_threshold: float,
177
+ ) -> list[PIL.Image.Image]:
178
+ self.preprocessor.load('MLSD')
179
+ control_image = self.preprocessor(
180
+ image=image,
181
+ image_resolution=image_resolution,
182
+ detect_resolution=preprocess_resolution,
183
+ thr_v=value_threshold,
184
+ thr_d=distance_threshold,
185
+ )
186
+ self.load_controlnet_weight('MLSD')
187
+ results = self.run_pipe(
188
+ prompt=self.get_prompt(prompt, additional_prompt),
189
+ negative_prompt=negative_prompt,
190
+ control_image=control_image,
191
+ num_images=num_images,
192
+ num_steps=num_steps,
193
+ guidance_scale=guidance_scale,
194
+ seed=seed,
195
+ )
196
+ return [control_image] + results
197
+
198
+ @torch.inference_mode()
199
+ def process_scribble(
200
+ self,
201
+ image: np.ndarray,
202
+ prompt: str,
203
+ additional_prompt: str,
204
+ negative_prompt: str,
205
+ num_images: int,
206
+ image_resolution: int,
207
+ preprocess_resolution: int,
208
+ num_steps: int,
209
+ guidance_scale: float,
210
+ seed: int,
211
+ preprocessor_name: str,
212
+ ) -> list[PIL.Image.Image]:
213
+ if preprocessor_name == 'None':
214
+ image = HWC3(image)
215
+ image = resize_image(image, resolution=image_resolution)
216
+ control_image = PIL.Image.fromarray(image)
217
+ elif preprocessor_name == 'HED':
218
+ self.preprocessor.load(preprocessor_name)
219
+ control_image = self.preprocessor(
220
+ image=image,
221
+ image_resolution=image_resolution,
222
+ detect_resolution=preprocess_resolution,
223
+ scribble=False,
224
+ )
225
+ elif preprocessor_name == 'PidiNet':
226
+ self.preprocessor.load(preprocessor_name)
227
+ control_image = self.preprocessor(
228
+ image=image,
229
+ image_resolution=image_resolution,
230
+ detect_resolution=preprocess_resolution,
231
+ safe=False,
232
+ )
233
+ self.load_controlnet_weight('scribble')
234
+ results = self.run_pipe(
235
+ prompt=self.get_prompt(prompt, additional_prompt),
236
+ negative_prompt=negative_prompt,
237
+ control_image=control_image,
238
+ num_images=num_images,
239
+ num_steps=num_steps,
240
+ guidance_scale=guidance_scale,
241
+ seed=seed,
242
+ )
243
+ return [control_image] + results
244
+
245
+ @torch.inference_mode()
246
+ def process_scribble_interactive(
247
+ self,
248
+ image_and_mask: dict[str, np.ndarray],
249
+ prompt: str,
250
+ additional_prompt: str,
251
+ negative_prompt: str,
252
+ num_images: int,
253
+ image_resolution: int,
254
+ num_steps: int,
255
+ guidance_scale: float,
256
+ seed: int,
257
+ ) -> list[PIL.Image.Image]:
258
+ image = image_and_mask['mask']
259
+ image = HWC3(image)
260
+ image = resize_image(image, resolution=image_resolution)
261
+ control_image = PIL.Image.fromarray(image)
262
+
263
+ self.load_controlnet_weight('scribble')
264
+ results = self.run_pipe(
265
+ prompt=self.get_prompt(prompt, additional_prompt),
266
+ negative_prompt=negative_prompt,
267
+ control_image=control_image,
268
+ num_images=num_images,
269
+ num_steps=num_steps,
270
+ guidance_scale=guidance_scale,
271
+ seed=seed,
272
+ )
273
+ return [control_image] + results
274
+
275
+ @torch.inference_mode()
276
+ def process_softedge(
277
+ self,
278
+ image: np.ndarray,
279
+ prompt: str,
280
+ additional_prompt: str,
281
+ negative_prompt: str,
282
+ num_images: int,
283
+ image_resolution: int,
284
+ preprocess_resolution: int,
285
+ num_steps: int,
286
+ guidance_scale: float,
287
+ seed: int,
288
+ preprocessor_name: str,
289
+ ) -> list[PIL.Image.Image]:
290
+ if preprocessor_name == 'None':
291
+ image = HWC3(image)
292
+ image = resize_image(image, resolution=image_resolution)
293
+ control_image = PIL.Image.fromarray(image)
294
+ elif preprocessor_name in ['HED', 'HED safe']:
295
+ safe = 'safe' in preprocessor_name
296
+ self.preprocessor.load('HED')
297
+ control_image = self.preprocessor(
298
+ image=image,
299
+ image_resolution=image_resolution,
300
+ detect_resolution=preprocess_resolution,
301
+ scribble=safe,
302
+ )
303
+ elif preprocessor_name in ['PidiNet', 'PidiNet safe']:
304
+ safe = 'safe' in preprocessor_name
305
+ self.preprocessor.load('PidiNet')
306
+ control_image = self.preprocessor(
307
+ image=image,
308
+ image_resolution=image_resolution,
309
+ detect_resolution=preprocess_resolution,
310
+ safe=safe,
311
+ )
312
+ else:
313
+ raise ValueError
314
+ self.load_controlnet_weight('softedge')
315
+ results = self.run_pipe(
316
+ prompt=self.get_prompt(prompt, additional_prompt),
317
+ negative_prompt=negative_prompt,
318
+ control_image=control_image,
319
+ num_images=num_images,
320
+ num_steps=num_steps,
321
+ guidance_scale=guidance_scale,
322
+ seed=seed,
323
+ )
324
+ return [control_image] + results
325
+
326
+ @torch.inference_mode()
327
+ def process_openpose(
328
+ self,
329
+ image: np.ndarray,
330
+ prompt: str,
331
+ additional_prompt: str,
332
+ negative_prompt: str,
333
+ num_images: int,
334
+ image_resolution: int,
335
+ preprocess_resolution: int,
336
+ num_steps: int,
337
+ guidance_scale: float,
338
+ seed: int,
339
+ preprocessor_name: str,
340
+ ) -> list[PIL.Image.Image]:
341
+ if preprocessor_name == 'None':
342
+ image = HWC3(image)
343
+ image = resize_image(image, resolution=image_resolution)
344
+ control_image = PIL.Image.fromarray(image)
345
+ else:
346
+ self.preprocessor.load('Openpose')
347
+ control_image = self.preprocessor(
348
+ image=image,
349
+ image_resolution=image_resolution,
350
+ detect_resolution=preprocess_resolution,
351
+ hand_and_face=True,
352
+ )
353
+ self.load_controlnet_weight('Openpose')
354
+ results = self.run_pipe(
355
+ prompt=self.get_prompt(prompt, additional_prompt),
356
+ negative_prompt=negative_prompt,
357
+ control_image=control_image,
358
+ num_images=num_images,
359
+ num_steps=num_steps,
360
+ guidance_scale=guidance_scale,
361
+ seed=seed,
362
+ )
363
+ return [control_image] + results
364
+
365
+ @torch.inference_mode()
366
+ def process_segmentation(
367
+ self,
368
+ image: np.ndarray,
369
+ prompt: str,
370
+ additional_prompt: str,
371
+ negative_prompt: str,
372
+ num_images: int,
373
+ image_resolution: int,
374
+ preprocess_resolution: int,
375
+ num_steps: int,
376
+ guidance_scale: float,
377
+ seed: int,
378
+ preprocessor_name: str,
379
+ ) -> list[PIL.Image.Image]:
380
+ if preprocessor_name == 'None':
381
+ image = HWC3(image)
382
+ image = resize_image(image, resolution=image_resolution)
383
+ control_image = PIL.Image.fromarray(image)
384
+ else:
385
+ self.preprocessor.load(preprocessor_name)
386
+ control_image = self.preprocessor(
387
+ image=image,
388
+ image_resolution=image_resolution,
389
+ detect_resolution=preprocess_resolution,
390
+ )
391
+ self.load_controlnet_weight('segmentation')
392
+ results = self.run_pipe(
393
+ prompt=self.get_prompt(prompt, additional_prompt),
394
+ negative_prompt=negative_prompt,
395
+ control_image=control_image,
396
+ num_images=num_images,
397
+ num_steps=num_steps,
398
+ guidance_scale=guidance_scale,
399
+ seed=seed,
400
+ )
401
+ return [control_image] + results
402
+
403
+ @torch.inference_mode()
404
+ def process_depth(
405
+ self,
406
+ image: np.ndarray,
407
+ prompt: str,
408
+ additional_prompt: str,
409
+ negative_prompt: str,
410
+ num_images: int,
411
+ image_resolution: int,
412
+ preprocess_resolution: int,
413
+ num_steps: int,
414
+ guidance_scale: float,
415
+ seed: int,
416
+ preprocessor_name: str,
417
+ ) -> list[PIL.Image.Image]:
418
+ if preprocessor_name == 'None':
419
+ image = HWC3(image)
420
+ image = resize_image(image, resolution=image_resolution)
421
+ control_image = PIL.Image.fromarray(image)
422
+ else:
423
+ self.preprocessor.load(preprocessor_name)
424
+ control_image = self.preprocessor(
425
+ image=image,
426
+ image_resolution=image_resolution,
427
+ detect_resolution=preprocess_resolution,
428
+ )
429
+ self.load_controlnet_weight('depth')
430
+ results = self.run_pipe(
431
+ prompt=self.get_prompt(prompt, additional_prompt),
432
+ negative_prompt=negative_prompt,
433
+ control_image=control_image,
434
+ num_images=num_images,
435
+ num_steps=num_steps,
436
+ guidance_scale=guidance_scale,
437
+ seed=seed,
438
+ )
439
+ return [control_image] + results
440
+
441
+ @torch.inference_mode()
442
+ def process_normal(
443
+ self,
444
+ image: np.ndarray,
445
+ prompt: str,
446
+ additional_prompt: str,
447
+ negative_prompt: str,
448
+ num_images: int,
449
+ image_resolution: int,
450
+ preprocess_resolution: int,
451
+ num_steps: int,
452
+ guidance_scale: float,
453
+ seed: int,
454
+ preprocessor_name: str,
455
+ ) -> list[PIL.Image.Image]:
456
+ if preprocessor_name == 'None':
457
+ image = HWC3(image)
458
+ image = resize_image(image, resolution=image_resolution)
459
+ control_image = PIL.Image.fromarray(image)
460
+ else:
461
+ self.preprocessor.load('NormalBae')
462
+ control_image = self.preprocessor(
463
+ image=image,
464
+ image_resolution=image_resolution,
465
+ detect_resolution=preprocess_resolution,
466
+ )
467
+ self.load_controlnet_weight('NormalBae')
468
+ results = self.run_pipe(
469
+ prompt=self.get_prompt(prompt, additional_prompt),
470
+ negative_prompt=negative_prompt,
471
+ control_image=control_image,
472
+ num_images=num_images,
473
+ num_steps=num_steps,
474
+ guidance_scale=guidance_scale,
475
+ seed=seed,
476
+ )
477
+ return [control_image] + results
478
+
479
+ @torch.inference_mode()
480
+ def process_lineart(
481
+ self,
482
+ image: np.ndarray,
483
+ prompt: str,
484
+ additional_prompt: str,
485
+ negative_prompt: str,
486
+ num_images: int,
487
+ image_resolution: int,
488
+ preprocess_resolution: int,
489
+ num_steps: int,
490
+ guidance_scale: float,
491
+ seed: int,
492
+ preprocessor_name: str,
493
+ ) -> list[PIL.Image.Image]:
494
+ if preprocessor_name in ['None', 'None (anime)']:
495
+ image = HWC3(image)
496
+ image = resize_image(image, resolution=image_resolution)
497
+ control_image = PIL.Image.fromarray(image)
498
+ elif preprocessor_name in ['Lineart', 'Lineart coarse']:
499
+ coarse = 'coarse' in preprocessor_name
500
+ self.preprocessor.load('Lineart')
501
+ control_image = self.preprocessor(
502
+ image=image,
503
+ image_resolution=image_resolution,
504
+ detect_resolution=preprocess_resolution,
505
+ coarse=coarse,
506
+ )
507
+ elif preprocessor_name == 'Lineart (anime)':
508
+ self.preprocessor.load('LineartAnime')
509
+ control_image = self.preprocessor(
510
+ image=image,
511
+ image_resolution=image_resolution,
512
+ detect_resolution=preprocess_resolution,
513
+ )
514
+ if 'anime' in preprocessor_name:
515
+ self.load_controlnet_weight('lineart_anime')
516
+ else:
517
+ self.load_controlnet_weight('lineart')
518
+ results = self.run_pipe(
519
+ prompt=self.get_prompt(prompt, additional_prompt),
520
+ negative_prompt=negative_prompt,
521
+ control_image=control_image,
522
+ num_images=num_images,
523
+ num_steps=num_steps,
524
+ guidance_scale=guidance_scale,
525
+ seed=seed,
526
+ )
527
+ return [control_image] + results
528
+
529
+ @torch.inference_mode()
530
+ def process_shuffle(
531
+ self,
532
+ image: np.ndarray,
533
+ prompt: str,
534
+ additional_prompt: str,
535
+ negative_prompt: str,
536
+ num_images: int,
537
+ image_resolution: int,
538
+ num_steps: int,
539
+ guidance_scale: float,
540
+ seed: int,
541
+ preprocessor_name: str,
542
+ ) -> list[PIL.Image.Image]:
543
+ if preprocessor_name == 'None':
544
+ image = HWC3(image)
545
+ image = resize_image(image, resolution=image_resolution)
546
+ control_image = PIL.Image.fromarray(image)
547
+ else:
548
+ self.preprocessor.load(preprocessor_name)
549
+ control_image = self.preprocessor(
550
+ image=image,
551
+ image_resolution=image_resolution,
552
+ )
553
+ self.load_controlnet_weight('shuffle')
554
+ results = self.run_pipe(
555
+ prompt=self.get_prompt(prompt, additional_prompt),
556
+ negative_prompt=negative_prompt,
557
+ control_image=control_image,
558
+ num_images=num_images,
559
+ num_steps=num_steps,
560
+ guidance_scale=guidance_scale,
561
+ seed=seed,
562
+ )
563
+ return [control_image] + results
564
+
565
+ @torch.inference_mode()
566
+ def process_ip2p(
567
+ self,
568
+ image: np.ndarray,
569
+ prompt: str,
570
+ additional_prompt: str,
571
+ negative_prompt: str,
572
+ num_images: int,
573
+ image_resolution: int,
574
+ num_steps: int,
575
+ guidance_scale: float,
576
+ seed: int,
577
+ ) -> list[PIL.Image.Image]:
578
+ image = HWC3(image)
579
+ image = resize_image(image, resolution=image_resolution)
580
+ control_image = PIL.Image.fromarray(image)
581
+ self.load_controlnet_weight('ip2p')
582
+ results = self.run_pipe(
583
+ prompt=self.get_prompt(prompt, additional_prompt),
584
+ negative_prompt=negative_prompt,
585
+ control_image=control_image,
586
+ num_images=num_images,
587
+ num_steps=num_steps,
588
+ guidance_scale=guidance_scale,
589
+ seed=seed,
590
+ )
591
+ return [control_image] + results
notebooks/notebook.ipynb ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "8CnkIPtjn8Dc"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "!git clone --recursive https://huggingface.co/spaces/hysts/ControlNet-v1-1"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": null,
17
+ "metadata": {
18
+ "id": "IZlaYNTWoFPK"
19
+ },
20
+ "outputs": [],
21
+ "source": [
22
+ "%cd ControlNet-v1-1"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": null,
28
+ "metadata": {
29
+ "id": "P_fzYrLvoIcI"
30
+ },
31
+ "outputs": [],
32
+ "source": [
33
+ "!pip install -q -r requirements.txt"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": null,
39
+ "metadata": {
40
+ "id": "GOfGng5Woktd"
41
+ },
42
+ "outputs": [],
43
+ "source": [
44
+ "import app"
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "code",
49
+ "execution_count": null,
50
+ "metadata": {
51
+ "id": "7Cued230ol7T"
52
+ },
53
+ "outputs": [],
54
+ "source": []
55
+ }
56
+ ],
57
+ "metadata": {
58
+ "accelerator": "GPU",
59
+ "colab": {
60
+ "provenance": []
61
+ },
62
+ "gpuClass": "standard",
63
+ "language_info": {
64
+ "name": "python"
65
+ }
66
+ },
67
+ "nbformat": 4,
68
+ "nbformat_minor": 0
69
+ }
preprocessor.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+
3
+ import numpy as np
4
+ import PIL.Image
5
+ import torch
6
+ from controlnet_aux import (CannyDetector, ContentShuffleDetector, HEDdetector,
7
+ LineartAnimeDetector, LineartDetector,
8
+ MidasDetector, MLSDdetector, NormalBaeDetector,
9
+ OpenposeDetector, PidiNetDetector)
10
+ from controlnet_aux.util import HWC3
11
+
12
+ from cv_utils import resize_image
13
+ from depth_estimator import DepthEstimator
14
+ from image_segmentor import ImageSegmentor
15
+
16
+
17
+ class Preprocessor:
18
+ MODEL_ID = 'lllyasviel/Annotators'
19
+
20
+ def __init__(self):
21
+ self.model = None
22
+ self.name = ''
23
+
24
+ def load(self, name: str) -> None:
25
+ if name == self.name:
26
+ return
27
+ if name == 'HED':
28
+ self.model = HEDdetector.from_pretrained(self.MODEL_ID)
29
+ elif name == 'Midas':
30
+ self.model = MidasDetector.from_pretrained(self.MODEL_ID)
31
+ elif name == 'MLSD':
32
+ self.model = MLSDdetector.from_pretrained(self.MODEL_ID)
33
+ elif name == 'Openpose':
34
+ self.model = OpenposeDetector.from_pretrained(self.MODEL_ID)
35
+ elif name == 'PidiNet':
36
+ self.model = PidiNetDetector.from_pretrained(self.MODEL_ID)
37
+ elif name == 'NormalBae':
38
+ self.model = NormalBaeDetector.from_pretrained(self.MODEL_ID)
39
+ elif name == 'Lineart':
40
+ self.model = LineartDetector.from_pretrained(self.MODEL_ID)
41
+ elif name == 'LineartAnime':
42
+ self.model = LineartAnimeDetector.from_pretrained(self.MODEL_ID)
43
+ elif name == 'Canny':
44
+ self.model = CannyDetector()
45
+ elif name == 'ContentShuffle':
46
+ self.model = ContentShuffleDetector()
47
+ elif name == 'DPT':
48
+ self.model = DepthEstimator()
49
+ elif name == 'UPerNet':
50
+ self.model = ImageSegmentor()
51
+ else:
52
+ raise ValueError
53
+ torch.cuda.empty_cache()
54
+ gc.collect()
55
+ self.name = name
56
+
57
+ def __call__(self, image: PIL.Image.Image, **kwargs) -> PIL.Image.Image:
58
+ if self.name == 'Canny':
59
+ if 'detect_resolution' in kwargs:
60
+ detect_resolution = kwargs.pop('detect_resolution')
61
+ image = np.array(image)
62
+ image = HWC3(image)
63
+ image = resize_image(image, resolution=detect_resolution)
64
+ image = self.model(image, **kwargs)
65
+ return PIL.Image.fromarray(image)
66
+ elif self.name == 'Midas':
67
+ detect_resolution = kwargs.pop('detect_resolution', 512)
68
+ image_resolution = kwargs.pop('image_resolution', 512)
69
+ image = np.array(image)
70
+ image = HWC3(image)
71
+ image = resize_image(image, resolution=detect_resolution)
72
+ image = self.model(image, **kwargs)
73
+ image = HWC3(image)
74
+ image = resize_image(image, resolution=image_resolution)
75
+ return PIL.Image.fromarray(image)
76
+ else:
77
+ return self.model(image, **kwargs)
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.20.3
2
+ controlnet_aux==0.0.5
3
+ diffusers==0.17.1
4
+ einops==0.6.1
5
+ gradio==3.34.0
6
+ huggingface-hub==0.14.1
7
+ opencv-python-headless==4.7.0.72
8
+ safetensors==0.3.1
9
+ torch==2.0.1
10
+ torchvision==0.15.2
11
+ transformers==4.30.2
12
+ xformers==0.0.20
style.css ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
utils.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+
4
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
5
+ if randomize_seed:
6
+ seed = random.randint(0, 1000000)
7
+ return seed