mattyamonaca commited on
Commit
e3dd038
โ€ข
1 Parent(s): 2fbc328

Add application file

Browse files
Files changed (10) hide show
  1. .gitignore +164 -0
  2. LICENSE +201 -0
  3. README.md +35 -13
  4. app.py +121 -0
  5. controlnet/lineart/__put_your_lineart_model +0 -0
  6. convertor.py +102 -0
  7. requirements.txt +13 -0
  8. sd_model.py +66 -0
  9. starline.py +416 -0
  10. utils.py +53 -0
.gitignore ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ output/
153
+ # Cython debug symbols
154
+ cython_debug/
155
+
156
+ # PyCharm
157
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
158
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
159
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
160
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
161
+ #.idea/
162
+
163
+ *.safetensors
164
+ *.json
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,13 +1,35 @@
1
- ---
2
- title: Starline
3
- emoji: ๐Ÿ“Š
4
- colorFrom: pink
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 4.33.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # starline
2
+ **St**rict coloring m**a**chine fo**r** **line** drawings.
3
+
4
+
5
+ ![image](https://github.com/mattyamonaca/starline/assets/48423148/eae07a6e-9c7b-4292-8c70-dac8ec8eeb7b)
6
+
7
+
8
+ https://github.com/mattyamonaca/starline/assets/48423148/8199c65c-a19f-42e9-aab7-df5ed6ef5b4c
9
+
10
+ # Installation
11
+ ```
12
+ git clone https://github.com/mattyamonaca/starline.git
13
+ cd starline
14
+ conda create -n starline python=3.10
15
+ conda activate starline
16
+ conda install pytorch torchvision pytorch-cuda=12.1 -c pytorch -c nvidia
17
+ pip install -r requirements.txt
18
+ ```
19
+
20
+ # Usage
21
+ - ```python app.py```
22
+ - Input the line drawing you wish to color (The background should be transparent).
23
+ - Input a prompt describing the color you want to add.
24
+
25
+ - ่ƒŒๆ™ฏใ‚’้€้Žใ—ใŸ็Šถๆ…‹ใง็ทš็”ปใ‚’ๅ…ฅๅŠ›ใ—ใพใ™
26
+ - ไป˜ใ‘ใŸใ„่‰ฒใ‚’่ชฌๆ˜Žใ™ใ‚‹ใƒ—ใƒญใƒณใƒ—ใƒˆใ‚’ๅ…ฅๅŠ›ใ—ใพใ™
27
+
28
+ # Precautions
29
+ - Image size 1024 x 1024 is recommended.
30
+ - Aliasing is a beta version.
31
+ - Areas finely surrounded by line drawings cannot be colored.
32
+
33
+ - ็”ปๅƒใ‚ตใ‚คใ‚บใฏ1024ร—1024ใ‚’ๆŽจๅฅจใ—ใพใ™
34
+ - ใ‚จใ‚คใƒชใ‚ขใ‚นๅ‡ฆ็†ใฏฮฒ็‰ˆใงใ™ใ€‚ใ‚ˆใ‚Š็ทš็”ปใซๅฟ ๅฎŸใงใ‚ใ‚‹ใ“ใจใ‚’ๆฑ‚ใ‚ใ‚‹ๅ ดๅˆใฏ2ๅ€ค็ทš็”ปใ‚’ๆŽจๅฅจใ—ใพใ™
35
+ - ็ทš็”ปใง็ดฐใ‹ใๅ›ฒใพใ‚ŒใŸ้ƒจๅˆ†ใฏ็€่‰ฒใงใใพใ›ใ‚“ใ€‚็€่‰ฒใงใใชใ„้ƒจๅˆ†ใฏ้€้Žใ—ใŸ็Šถๆ…‹ใงๅ‡บๅŠ›ใ•ใ‚Œใพใ™ใ€‚
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import sys
3
+ from starline import process
4
+
5
+ from utils import load_cn_model, load_cn_config, randomname
6
+ from convertor import pil2cv, cv2pil
7
+
8
+ from sd_model import get_cn_pipeline, generate, get_cn_detector
9
+ import cv2
10
+ import os
11
+ import numpy as np
12
+ from PIL import Image
13
+ import zipfile
14
+ import torch
15
+
16
+ zero = torch.Tensor([0]).cuda()
17
+
18
+ path = os.getcwd()
19
+ output_dir = f"{path}/output"
20
+ input_dir = f"{path}/input"
21
+ cn_lineart_dir = f"{path}/controlnet/lineart"
22
+
23
+ load_cn_model(cn_lineart_dir)
24
+ load_cn_config(cn_lineart_dir)
25
+
26
+
27
+ def zip_png_files(folder_path):
28
+ # Zipใƒ•ใ‚กใ‚คใƒซใฎๅๅ‰ใ‚’่จญๅฎš๏ผˆใƒ•ใ‚ฉใƒซใƒ€ๅใจๅŒใ˜ใซใ—ใพใ™๏ผ‰
29
+ zip_path = os.path.join(folder_path, 'output.zip')
30
+
31
+ # zipfileใ‚ชใƒ–ใ‚ธใ‚งใ‚ฏใƒˆใ‚’ไฝœๆˆใ—ใ€ๆ›ธใ่พผใฟใƒขใƒผใƒ‰ใง้–‹ใ
32
+ with zipfile.ZipFile(zip_path, 'w') as zipf:
33
+ # ใƒ•ใ‚ฉใƒซใƒ€ๅ†…ใฎใ™ในใฆใฎใƒ•ใ‚กใ‚คใƒซใ‚’ใƒซใƒผใƒ—ๅ‡ฆ็†
34
+ for foldername, subfolders, filenames in os.walk(folder_path):
35
+ for filename in filenames:
36
+ # PNGใƒ•ใ‚กใ‚คใƒซใฎใฟใ‚’ๅฏพ่ฑกใซใ™ใ‚‹
37
+ if filename.endswith('.png'):
38
+ # ใƒ•ใ‚กใ‚คใƒซใฎใƒ•ใƒซใƒ‘ใ‚นใ‚’ๅ–ๅพ—
39
+ file_path = os.path.join(foldername, filename)
40
+ # zipใƒ•ใ‚กใ‚คใƒซใซ่ฟฝๅŠ 
41
+ zipf.write(file_path, arcname=os.path.relpath(file_path, folder_path))
42
+
43
+
44
+ class webui:
45
+ def __init__(self):
46
+ self.demo = gr.Blocks()
47
+
48
+ def undercoat(self, input_image, pos_prompt, neg_prompt, alpha_th, thickness):
49
+ org_line_image = input_image
50
+ image = pil2cv(input_image)
51
+ image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
52
+
53
+ index = np.where(image[:, :, 3] == 0)
54
+ image[index] = [255, 255, 255, 255]
55
+ input_image = cv2pil(image)
56
+
57
+ pipe = get_cn_pipeline()
58
+ detectors = get_cn_detector(input_image.resize((1024, 1024), Image.ANTIALIAS))
59
+
60
+
61
+ gen_image = generate(pipe, detectors, pos_prompt, neg_prompt)
62
+ color_img, unfinished = process(gen_image.resize((image.shape[1], image.shape[0]), Image.ANTIALIAS) , org_line_image, alpha_th, thickness)
63
+ color_img.save(f"{output_dir}/color_img.png")
64
+
65
+ #color_img = color_img.resize((image.shape[1], image.shape[0]) , Image.ANTIALIAS)
66
+
67
+
68
+ output_img = Image.alpha_composite(color_img, org_line_image)
69
+ name = randomname(10)
70
+ os.makedirs(f"{output_dir}/{name}")
71
+ output_img.save(f"{output_dir}/{name}/output_image.png")
72
+ org_line_image.save(f"{output_dir}/{name}/line_image.png")
73
+ color_img.save(f"{output_dir}/{name}/color_image.png")
74
+ unfinished.save(f"{output_dir}/{name}/unfinished_image.png")
75
+
76
+ outputs = [output_img, org_line_image, color_img, unfinished]
77
+ zip_png_files(f"{output_dir}/{name}")
78
+ filename = f"{output_dir}/{name}/output.zip"
79
+
80
+ return outputs, filename
81
+
82
+
83
+
84
+ def launch(self, share):
85
+ with self.demo:
86
+ with gr.Row():
87
+ with gr.Column():
88
+ input_image = gr.Image(type="pil", image_mode="RGBA")
89
+
90
+ pos_prompt = gr.Textbox(value="1girl, blue hair, pink shirts, bestquality, 4K", max_lines=1000, label="positive prompt")
91
+ neg_prompt = gr.Textbox(value=" (worst quality, low quality:1.2), (lowres:1.2), (bad anatomy:1.2), (greyscale, monochrome:1.4)", max_lines=1000, label="negative prompt")
92
+
93
+ alpha_th = gr.Slider(maximum = 255, value=100, label = "alpha threshold")
94
+ thickness = gr.Number(value=5, label="Thickness of correction area (Odd numbers need to be entered)")
95
+ #gr.Slider(maximum = 21, value=3, step=2, label = "Thickness of correction area")
96
+
97
+ submit = gr.Button(value="Start")
98
+ with gr.Row():
99
+ with gr.Column():
100
+ with gr.Tab("output"):
101
+ output_0 = gr.Gallery(format="png")
102
+ output_file = gr.File()
103
+ submit.click(
104
+ self.undercoat,
105
+ inputs=[input_image, pos_prompt, neg_prompt, alpha_th, thickness],
106
+ outputs=[output_0, output_file]
107
+ )
108
+
109
+ self.demo.queue()
110
+ self.demo.launch(share=share)
111
+
112
+
113
+ if __name__ == "__main__":
114
+ ui = webui()
115
+ if len(sys.argv) > 1:
116
+ if sys.argv[1] == "share":
117
+ ui.launch(share=True)
118
+ else:
119
+ ui.launch(share=False)
120
+ else:
121
+ ui.launch(share=False)
controlnet/lineart/__put_your_lineart_model ADDED
File without changes
convertor.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ from skimage import color
4
+ from PIL import Image
5
+
6
+
7
+ def skimage_rgb2lab(rgb):
8
+ return color.rgb2lab(rgb.reshape(1,1,3))
9
+
10
+
11
+ def rgb2df(img):
12
+ h, w, _ = img.shape
13
+ x_l, y_l = np.meshgrid(np.arange(h), np.arange(w), indexing='ij')
14
+ r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
15
+ df = pd.DataFrame({
16
+ "x_l": x_l.ravel(),
17
+ "y_l": y_l.ravel(),
18
+ "r": r.ravel(),
19
+ "g": g.ravel(),
20
+ "b": b.ravel(),
21
+ })
22
+ return df
23
+
24
+ def mask2df(mask):
25
+ h, w = mask.shape
26
+ x_l, y_l = np.meshgrid(np.arange(h), np.arange(w), indexing='ij')
27
+ flg = mask.astype(int)
28
+ df = pd.DataFrame({
29
+ "x_l_m": x_l.ravel(),
30
+ "y_l_m": y_l.ravel(),
31
+ "m_flg": flg.ravel(),
32
+ })
33
+ return df
34
+
35
+
36
+ def rgba2df(img):
37
+ h, w, _ = img.shape
38
+ x_l, y_l = np.meshgrid(np.arange(h), np.arange(w), indexing='ij')
39
+ r, g, b, a = img[:,:,0], img[:,:,1], img[:,:,2], img[:,:,3]
40
+ df = pd.DataFrame({
41
+ "x_l": x_l.ravel(),
42
+ "y_l": y_l.ravel(),
43
+ "r": r.ravel(),
44
+ "g": g.ravel(),
45
+ "b": b.ravel(),
46
+ "a": a.ravel()
47
+ })
48
+ return df
49
+
50
+ def hsv2df(img):
51
+ x_l, y_l = np.meshgrid(np.arange(img.shape[0]), np.arange(img.shape[1]), indexing='ij')
52
+ h, s, v = np.transpose(img, (2, 0, 1))
53
+ df = pd.DataFrame({'x_l': x_l.flatten(), 'y_l': y_l.flatten(), 'h': h.flatten(), 's': s.flatten(), 'v': v.flatten()})
54
+ return df
55
+
56
+ def df2rgba(img_df):
57
+ r_img = img_df.pivot_table(index="x_l", columns="y_l",values= "r").reset_index(drop=True).values
58
+ g_img = img_df.pivot_table(index="x_l", columns="y_l",values= "g").reset_index(drop=True).values
59
+ b_img = img_df.pivot_table(index="x_l", columns="y_l",values= "b").reset_index(drop=True).values
60
+ a_img = img_df.pivot_table(index="x_l", columns="y_l",values= "a").reset_index(drop=True).values
61
+ df_img = np.stack([r_img, g_img, b_img, a_img], 2).astype(np.uint8)
62
+ return df_img
63
+
64
+ def df2bgra(img_df):
65
+ r_img = img_df.pivot_table(index="x_l", columns="y_l",values= "r").reset_index(drop=True).values
66
+ g_img = img_df.pivot_table(index="x_l", columns="y_l",values= "g").reset_index(drop=True).values
67
+ b_img = img_df.pivot_table(index="x_l", columns="y_l",values= "b").reset_index(drop=True).values
68
+ a_img = img_df.pivot_table(index="x_l", columns="y_l",values= "a").reset_index(drop=True).values
69
+ df_img = np.stack([b_img, g_img, r_img, a_img], 2).astype(np.uint8)
70
+ return df_img
71
+
72
+ def df2rgb(img_df):
73
+ r_img = img_df.pivot_table(index="x_l", columns="y_l",values= "r").reset_index(drop=True).values
74
+ g_img = img_df.pivot_table(index="x_l", columns="y_l",values= "g").reset_index(drop=True).values
75
+ b_img = img_df.pivot_table(index="x_l", columns="y_l",values= "b").reset_index(drop=True).values
76
+ df_img = np.stack([r_img, g_img, b_img], 2).astype(np.uint8)
77
+ return df_img
78
+
79
+ def pil2cv(image):
80
+ new_image = np.array(image, dtype=np.uint8)
81
+ if new_image.ndim == 2:
82
+ pass
83
+ elif new_image.shape[2] == 3:
84
+ new_image = new_image[:, :, ::-1]
85
+ elif new_image.shape[2] == 4:
86
+ new_image = new_image[:, :, [2, 1, 0, 3]]
87
+ return new_image
88
+
89
+ def cv2pil(image):
90
+ new_image = image.copy()
91
+ if new_image.ndim == 2:
92
+ pass
93
+ elif new_image.shape[2] == 3:
94
+ new_image = new_image[:, :, ::-1]
95
+ elif new_image.shape[2] == 4:
96
+ new_image = new_image[:, :, [2, 1, 0, 3]]
97
+ new_image = Image.fromarray(new_image)
98
+ return new_image
99
+
100
+
101
+
102
+
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ opencv-python==4.7.0.68
2
+ pandas==1.5.3
3
+ scikit-learn==1.2.1
4
+ scikit-image==0.19.3
5
+ Pillow==9.4.0
6
+ tqdm==4.63.0
7
+ diffusers==0.27.2
8
+ gradio==4.32.1
9
+ gradio_client==0.17.0
10
+ transformers==4.40.1
11
+ accelerate==0.21.0
12
+ safetensors==0.4.2
13
+
sd_model.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
2
+ from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL
3
+ import torch
4
+ import spaces
5
+
6
+ device = "cuda"
7
+
8
+ def get_cn_pipeline():
9
+ controlnets = [
10
+ ControlNetModel.from_pretrained("./controlnet/lineart", torch_dtype=torch.float16, use_safetensors=True),
11
+ ControlNetModel.from_pretrained("mattyamonaca/controlnet_line2line_xl", torch_dtype=torch.float16)
12
+ ]
13
+
14
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
15
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
16
+ "cagliostrolab/animagine-xl-3.1", controlnet=controlnets, vae=vae, torch_dtype=torch.float16
17
+ )
18
+
19
+ pipe.enable_model_cpu_offload()
20
+
21
+ #if pipe.safety_checker is not None:
22
+ # pipe.safety_checker = lambda images, **kwargs: (images, [False])
23
+
24
+ #pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
25
+ #pipe.to(device)
26
+
27
+ return pipe
28
+
29
+ def invert_image(img):
30
+ # ็”ปๅƒใ‚’่ชญใฟ่พผใ‚€
31
+ # ็”ปๅƒใ‚’ใ‚ฐใƒฌใƒผใ‚นใ‚ฑใƒผใƒซใซๅค‰ๆ›๏ผˆใ‚‚ใ—ใ‚‚ใจใ‚‚ใจ็™ฝ้ป’ใงใชใ„ๅ ดๅˆ๏ผ‰
32
+ img = img.convert('L')
33
+ # ็”ปๅƒใฎๅ„ใƒ”ใ‚ฏใ‚ปใƒซใ‚’ๅ่ปข
34
+ inverted_img = img.point(lambda p: 255 - p)
35
+ # ๅ่ปขใ—ใŸ็”ปๅƒใ‚’ไฟๅญ˜
36
+ return inverted_img
37
+
38
+
39
+ def get_cn_detector(image):
40
+ #lineart_anime = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
41
+ #canny = CannyDetector()
42
+ #lineart_anime_img = lineart_anime(image)
43
+ #canny_img = canny(image)
44
+ #canny_img = canny_img.resize((lineart_anime(image).width, lineart_anime(image).height))
45
+ re_image = invert_image(image)
46
+
47
+
48
+ detectors = [re_image, image]
49
+ print(detectors)
50
+ return detectors
51
+
52
+ @spaces.GPU
53
+ def generate(pipe, detectors, prompt, negative_prompt):
54
+ default_pos = ""
55
+ default_neg = ""
56
+ prompt = default_pos + prompt
57
+ negative_prompt = default_neg + negative_prompt
58
+ print(type(pipe))
59
+ image = pipe(
60
+ prompt=prompt,
61
+ negative_prompt = negative_prompt,
62
+ image=detectors,
63
+ num_inference_steps=50,
64
+ controlnet_conditioning_scale=[1.0, 0.2],
65
+ ).images[0]
66
+ return image
starline.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict, deque
2
+
3
+ import cv2
4
+ import numpy as np
5
+ from PIL import Image
6
+ from skimage.color import deltaE_ciede2000, rgb2lab
7
+ from tqdm import tqdm
8
+
9
+
10
+ def modify_transparency(img, target_rgb):
11
+ # ็”ปๅƒใ‚’่ชญใฟ่พผใ‚€
12
+ copy_img = img.copy()
13
+ data = copy_img.getdata()
14
+
15
+ # ๆ–ฐใ—ใ„ใƒ”ใ‚ฏใ‚ปใƒซใƒ‡ใƒผใ‚ฟใ‚’ไฝœๆˆ
16
+ new_data = []
17
+ for item in data:
18
+ # ๆŒ‡ๅฎšใ•ใ‚ŒใŸRGBๅ€คใฎใƒ”ใ‚ฏใ‚ปใƒซใฎๅ ดๅˆใ€้€ๆ˜Žๅบฆใ‚’255ใซ่จญๅฎš
19
+ if item[:3] == target_rgb:
20
+ new_data.append((item[0], item[1], item[2], 255))
21
+ else:
22
+ # ใใ‚Œไปฅๅค–ใฎๅ ดๅˆใ€้€ๆ˜Žๅบฆใ‚’0ใซ่จญๅฎš
23
+ new_data.append((item[0], item[1], item[2], 0))
24
+
25
+ # ๆ–ฐใ—ใ„ใƒ‡ใƒผใ‚ฟใ‚’็”ปๅƒใซ่จญๅฎšใ—็›ดใ™
26
+ copy_img.putdata(new_data)
27
+ return copy_img
28
+
29
+
30
+ def replace_color(image, color_1, color_2, alpha_np):
31
+ # ็”ปๅƒใƒ‡ใƒผใ‚ฟใ‚’้…ๅˆ—ใซๅค‰ๆ›
32
+ data = np.array(image)
33
+
34
+ # RGBAใƒขใƒผใƒ‰ใฎ็”ปๅƒใงใ‚ใ‚‹ใŸใ‚ใ€ๅฝข็Šถๅค‰ๆ›ดๆ™‚ใซ4ใƒใƒฃใƒใƒซใ‚’่€ƒๆ…ฎ
35
+ original_shape = data.shape
36
+
37
+ color_1 = np.array(color_1, dtype=np.uint8)
38
+ color_2 = np.array(color_2, dtype=np.uint8)
39
+
40
+ # ๅน…ๅ„ชๅ…ˆๆŽข็ดขใง color_1 ใฎ้ ˜ๅŸŸใ‚’ๅค–ๅดใ‹ใ‚‰ๅก—ใ‚Šใคใถใ™
41
+ # color_2 ใงไฟ่ญทใ•ใ‚ŒใŸใ‚ชใƒชใ‚ธใƒŠใƒซใฎ็ทš็”ป
42
+ protected = np.all(data[:, :, :3] == color_2, axis=2)
43
+ # color_1 ใงๅก—ใ‚‰ใ‚ŒใŸๅก—ใ‚Šใคใถใ—ใŸใ„้ ˜ๅŸŸ
44
+ fill_target = np.all(data[:, :, :3] == color_1, axis=2)
45
+ # ใ™ใงใซๅก—ใ‚‰ใ‚Œใฆใ„ใ‚‹้ ˜ๅŸŸ
46
+ colored = (protected | fill_target) == False
47
+
48
+ # bfs ใฎๅง‹็‚นใ‚’ๅˆ—ๆŒ™
49
+ # colored ใ‚’ใใฎใพใพไฝฟใฃใฆใ‚‚ใ„ใ„ใŒใ€pythonใฏ้…ใ„ใฎใงnumpy็ตŒ็”ฑใฎใ“ใฎๆ–นใŒ้€Ÿใ„
50
+ # ไธŠไธ‹ๅทฆๅณใซใ‚ทใƒ•ใƒˆใ—ใŸ fill_target & colored == True ใซใชใ‚‹ใ‚„ใค
51
+ adj_r = colored & np.roll(fill_target, -1, axis=0)
52
+ adj_r[:, -1] = False
53
+ adj_l = colored & np.roll(fill_target, 1, axis=0)
54
+ adj_l[:, 0] = False
55
+ adj_u = colored & np.roll(fill_target, 1, axis=1)
56
+ adj_u[:, 0] = False
57
+ adj_d = colored & np.roll(fill_target, -1, axis=1)
58
+ adj_d[:, -1] = False
59
+
60
+ # ใใฎใƒ”ใ‚ฏใ‚ปใƒซใฏใ™ใงใซๅก—ใ‚‰ใ‚Œใฆใ„ใฆใ€ไธŠไธ‹ๅทฆๅณใ„ใšใ‚Œใ‹ใฎใƒ”ใ‚ฏใ‚ปใƒซใŒ color_1 ใงใ‚ใ‚‹ใ‚‚ใฎ
61
+ bfs_start = adj_r | adj_l | adj_u | adj_d
62
+
63
+ que = deque(
64
+ zip(*np.where(bfs_start)),
65
+ maxlen=original_shape[0] * original_shape[1] * 2,
66
+ )
67
+
68
+ with tqdm(total=original_shape[0] * original_shape[1]) as pbar:
69
+ pbar.update(np.sum(colored) - np.sum(bfs_start) + np.sum(protected))
70
+ while len(que) > 0:
71
+ y, x = que.popleft()
72
+ neighbors = [
73
+ (x - 1, y),
74
+ (x + 1, y),
75
+ (x, y - 1),
76
+ (x, y + 1), # ไธŠไธ‹ๅทฆๅณ
77
+ ]
78
+ pbar.update(1)
79
+ # assert not fill_target[y, x] and not protected[y, x]
80
+ # assert colored[y, x]
81
+ color = data[y, x, :3]
82
+
83
+ for nx, ny in neighbors:
84
+ if (
85
+ nx < 0
86
+ or nx >= original_shape[1]
87
+ or ny < 0
88
+ or ny >= original_shape[0]
89
+ ):
90
+ continue
91
+ if fill_target[ny, nx]:
92
+ fill_target[ny, nx] = False
93
+ # colored[ny, nx] = True
94
+ data[ny, nx, :3] = color
95
+ que.append((ny, nx))
96
+ pbar.update(pbar.total - pbar.n)
97
+
98
+ data[:, :, 3] = 255 - alpha_np
99
+ return Image.fromarray(data, "RGBA")
100
+
101
+
102
+ def recolor_lineart_and_composite(lineart_image, base_image, new_color, alpha_th):
103
+ """
104
+ Recolor an RGBA lineart image to a single new color while preserving alpha, and composite it over a base image.
105
+
106
+ Args:
107
+ lineart_image (PIL.Image): The lineart image with RGBA channels.
108
+ base_image (PIL.Image): The base image to composite onto.
109
+ new_color (tuple): The new RGB color for the lineart (e.g., (255, 0, 0) for red).
110
+
111
+ Returns:
112
+ PIL.Image: The composited image with the recolored lineart on top.
113
+ """
114
+ # Ensure images are in RGBA mode
115
+ if lineart_image.mode != "RGBA":
116
+ lineart_image = lineart_image.convert("RGBA")
117
+ if base_image.mode != "RGBA":
118
+ base_image = base_image.convert("RGBA")
119
+
120
+ # Extract the alpha channel from the lineart image
121
+ r, g, b, alpha = lineart_image.split()
122
+
123
+ alpha_np = np.array(alpha)
124
+ alpha_np[alpha_np < alpha_th] = 0
125
+ alpha_np[alpha_np >= alpha_th] = 255
126
+
127
+ new_alpha = Image.fromarray(alpha_np)
128
+
129
+ # Create a new image using the new color and the alpha channel from the original lineart
130
+ new_lineart_image = Image.merge(
131
+ "RGBA",
132
+ (
133
+ Image.new("L", lineart_image.size, int(new_color[0])),
134
+ Image.new("L", lineart_image.size, int(new_color[1])),
135
+ Image.new("L", lineart_image.size, int(new_color[2])),
136
+ new_alpha,
137
+ ),
138
+ )
139
+
140
+ # Composite the new lineart image over the base image
141
+ composite_image = Image.alpha_composite(base_image, new_lineart_image)
142
+
143
+ return composite_image, alpha_np
144
+
145
+
146
+ def thicken_and_recolor_lines(base_image, lineart, thickness=3, new_color=(0, 0, 0)):
147
+ """
148
+ Thicken the lines of a lineart image, recolor them, and composite onto another image,
149
+ while preserving the transparency of the original lineart.
150
+
151
+ Args:
152
+ base_image (PIL.Image): The base image to composite onto.
153
+ lineart (PIL.Image): The lineart image with transparent background.
154
+ thickness (int): The desired thickness of the lines.
155
+ new_color (tuple): The new color to apply to the lines (R, G, B).
156
+
157
+ Returns:
158
+ PIL.Image: The image with the recolored and thickened lineart composited on top.
159
+ """
160
+ # Ensure both images are in RGBA format
161
+ if base_image.mode != "RGBA":
162
+ base_image = base_image.convert("RGBA")
163
+ if lineart.mode != "RGB":
164
+ lineart = lineart.convert("RGBA")
165
+
166
+ # Convert the lineart image to OpenCV format
167
+ lineart_cv = np.array(lineart)
168
+
169
+ white_pixels = np.sum(lineart_cv == 255)
170
+ black_pixels = np.sum(lineart_cv == 0)
171
+
172
+ lineart_gray = cv2.cvtColor(lineart_cv, cv2.COLOR_RGBA2GRAY)
173
+
174
+ if white_pixels > black_pixels:
175
+ lineart_gray = cv2.bitwise_not(lineart_gray)
176
+
177
+ # Thicken the lines using OpenCV
178
+ kernel = np.ones((thickness, thickness), np.uint8)
179
+ lineart_thickened = cv2.dilate(lineart_gray, kernel, iterations=1)
180
+ lineart_thickened = cv2.bitwise_not(lineart_thickened)
181
+ # Create a new RGBA image for the recolored lineart
182
+ lineart_recolored = np.zeros_like(lineart_cv)
183
+ lineart_recolored[:, :, :3] = new_color # Set new RGB color
184
+
185
+ lineart_recolored[:, :, 3] = np.where(
186
+ lineart_thickened < 250, 255, 0
187
+ ) # Blend alpha with thickened lines
188
+
189
+ # Convert back to PIL Image
190
+ lineart_recolored_pil = Image.fromarray(lineart_recolored, "RGBA")
191
+
192
+ # Composite the thickened and recolored lineart onto the base image
193
+ combined_image = Image.alpha_composite(base_image, lineart_recolored_pil)
194
+
195
+ return combined_image
196
+
197
+
198
+ def generate_distant_colors(consolidated_colors, distance_threshold):
199
+ """
200
+ Generate new RGB colors that are at least 'distance_threshold' CIEDE2000 units away from given colors.
201
+
202
+ Args:
203
+ consolidated_colors (list of tuples): List of ((R, G, B), count) tuples.
204
+ distance_threshold (float): The minimum CIEDE2000 distance from the given colors.
205
+
206
+ Returns:
207
+ list of tuples: List of new RGB colors that meet the distance requirement.
208
+ """
209
+ # new_colors = []
210
+ # Convert the consolidated colors to LAB
211
+ consolidated_lab = [
212
+ rgb2lab(np.array([color], dtype=np.float32) / 255.0).reshape(3)
213
+ for color, _ in consolidated_colors
214
+ ]
215
+
216
+ # Try to find a distant color
217
+ max_attempts = 1000
218
+ best_dist = 0.0
219
+ best_color = (0, 0, 0)
220
+
221
+ # np.random.seed(42)
222
+ for _ in range(max_attempts):
223
+ # Generate a random color in RGB and convert to LAB
224
+ random_rgb = np.random.randint(0, 256, size=3)
225
+ random_lab = rgb2lab(np.array([random_rgb], dtype=np.float32) / 255.0).reshape(
226
+ 3
227
+ )
228
+ # consolidated_lab ใซใ‚ใ‚‹่‰ฒใ‹ใ‚‰ใงใใ‚‹ใ ใ‘้ ใ„่‰ฒใ‚’้ธใณใŸใ„
229
+ min_distance = min(
230
+ map(
231
+ lambda base_color_lab: deltaE_ciede2000(base_color_lab, random_lab),
232
+ consolidated_lab,
233
+ )
234
+ )
235
+ if min_distance > distance_threshold:
236
+ return tuple(random_rgb)
237
+ # ้–พๅ€คไปฅไธŠใฎใ‚‚ใฎใŒ่ฆ‹ใคใ‹ใ‚‰ใชใ‹ใฃใŸๅ ดๅˆใซๅ‚™ใˆใฆไธ€็•ช่‰ฏใ‹ใฃใŸใ‚‚ใฎใ‚’่ฆšใˆใฆใŠใ
238
+ if best_dist < min_distance:
239
+ best_dist = min_distance
240
+ best_color = tuple(random_rgb)
241
+ return best_color
242
+
243
+
244
+ def consolidate_colors(major_colors, threshold):
245
+ """
246
+ Consolidate similar colors in the major_colors list based on the CIEDE2000 metric.
247
+
248
+ Args:
249
+ major_colors (list of tuples): List of ((R, G, B), count) tuples.
250
+ threshold (float): Threshold for CIEDE2000 color difference.
251
+
252
+ Returns:
253
+ list of tuples: Consolidated list of ((R, G, B), count) tuples.
254
+ """
255
+ # Convert RGB to LAB
256
+ colors_lab = [
257
+ rgb2lab(np.array([[color]], dtype=np.float32) / 255.0).reshape(3)
258
+ for color, _ in major_colors
259
+ ]
260
+ n = len(colors_lab)
261
+
262
+ # Find similar colors and consolidate
263
+ i = 0
264
+ while i < n:
265
+ j = i + 1
266
+ while j < n:
267
+ delta_e = deltaE_ciede2000(colors_lab[i], colors_lab[j])
268
+ if delta_e < threshold:
269
+ # Compare counts and consolidate to the color with the higher count
270
+ if major_colors[i][1] >= major_colors[j][1]:
271
+ major_colors[i] = (
272
+ major_colors[i][0],
273
+ major_colors[i][1] + major_colors[j][1],
274
+ )
275
+ major_colors.pop(j)
276
+ colors_lab.pop(j)
277
+ else:
278
+ major_colors[j] = (
279
+ major_colors[j][0],
280
+ major_colors[j][1] + major_colors[i][1],
281
+ )
282
+ major_colors.pop(i)
283
+ colors_lab.pop(i)
284
+ n -= 1
285
+ continue
286
+ j += 1
287
+ i += 1
288
+
289
+ return major_colors
290
+
291
+
292
+ def get_major_colors(image, threshold_percentage=0.01):
293
+ """
294
+ Analyze an image to find the major RGB values based on a threshold percentage.
295
+
296
+ Args:
297
+ image (PIL.Image): The image to analyze.
298
+ threshold_percentage (float): The percentage threshold to consider a color as major.
299
+
300
+ Returns:
301
+ list of tuples: A list of (color, count) tuples for colors that are more frequent than the threshold.
302
+ """
303
+ # Convert image to RGB if it's not
304
+ if image.mode != "RGB":
305
+ image = image.convert("RGB")
306
+
307
+ # Count each color
308
+ color_count = defaultdict(int)
309
+ for pixel in image.getdata():
310
+ color_count[pixel] += 1
311
+
312
+ # Total number of pixels
313
+ total_pixels = image.width * image.height
314
+
315
+ # Filter colors to find those above the threshold
316
+ major_colors = [
317
+ (color, count)
318
+ for color, count in color_count.items()
319
+ if (count / total_pixels) >= threshold_percentage
320
+ ]
321
+
322
+ return major_colors
323
+
324
+
325
+ def process(image, lineart, alpha_th, thickness):
326
+ org = image
327
+ image.save("tmp.png")
328
+
329
+ major_colors = get_major_colors(image, threshold_percentage=0.05)
330
+ major_colors = consolidate_colors(major_colors, 10)
331
+
332
+ th = 10
333
+ threshold_percentage = 0.05
334
+ while len(major_colors) < 1:
335
+ threshold_percentage = threshold_percentage - 0.001
336
+ major_colors = get_major_colors(image, threshold_percentage=threshold_percentage)
337
+
338
+ while len(major_colors) < 1:
339
+ th = th + 1
340
+ major_colors = consolidate_colors(major_colors, th)
341
+
342
+ new_color_1 = generate_distant_colors(major_colors, 50)
343
+ image = thicken_and_recolor_lines(
344
+ org, lineart, thickness=thickness, new_color=new_color_1
345
+ )
346
+
347
+ major_colors.append((new_color_1, 0))
348
+ new_color_2 = generate_distant_colors(major_colors, 40)
349
+ image, alpha_np = recolor_lineart_and_composite(
350
+ lineart, image, new_color_2, alpha_th
351
+ )
352
+ # import time
353
+ # start = time.time()
354
+ image = replace_color(image, new_color_1, new_color_2, alpha_np)
355
+ # end = time.time()
356
+ # print(f"{end-start} sec")
357
+ unfinished = modify_transparency(image, new_color_1)
358
+
359
+ return image, unfinished
360
+
361
+
362
+ def main():
363
+ import os
364
+ import sys
365
+ from argparse import ArgumentParser
366
+
367
+ from PIL import Image
368
+
369
+ from utils import randomname
370
+
371
+ args = ArgumentParser(
372
+ prog="starline",
373
+ description="Starline",
374
+ epilog="Starline",
375
+ )
376
+ args.add_argument("-c", "--colored_image", help="colored image", required=True)
377
+ args.add_argument("-l", "--lineart_image", help="lineart image", required=True)
378
+ args.add_argument("-o", "--output_dir", help="output directory", default="output")
379
+ args.add_argument("-a", "--alpha_th", help="alpha threshold", default=100, type=int)
380
+ args.add_argument("-t", "--thickness", help="line thickness", default=5, type=int)
381
+
382
+ args = args.parse_args(sys.argv[1:])
383
+ colored_image_path = args.colored_image
384
+ lineart_image_path = args.lineart_image
385
+ alpha = args.alpha_th
386
+ thickness = args.thickness
387
+ output_dir = args.output_dir
388
+
389
+ colored_image = Image.open(colored_image_path)
390
+ lineart_image = Image.open(lineart_image_path)
391
+ if lineart_image.mode == "P" or lineart_image.mode == "L":
392
+ # ็ทš็”ปใŒ 1-channel ็”ปๅƒใฎใจใใฎๅ‡ฆ็†
393
+ # alpha-channel ใฎๆƒ…ๅ ฑใŒๅ…ฅๅŠ›ใ•ใ‚ŒใŸใจไปฎๅฎšใ—ใฆ (้€ๆ˜Ž -> 0, ไธ้€ๆ˜Ž -> 255)
394
+ # RGB channel ใฏใ“ใ‚Œใ‚’ๅ่ปขใ•ใ›ใŸใ‚‚ใฎใซใ™ใ‚‹ (้€ๆ˜Ž -> ็™ฝ -> 255, ไธ้€ๆ˜Ž -> ้ป’ -> 0)
395
+ lineart_image = lineart_image.convert("RGBA")
396
+ lineart_image = np.array(lineart_image)
397
+ lineart_image[:, :, 0] = 255 - lineart_image[:, :, 3]
398
+ lineart_image[:, :, 1] = 255 - lineart_image[:, :, 3]
399
+ lineart_image[:, :, 2] = 255 - lineart_image[:, :, 3]
400
+ lineart_image = Image.fromarray(lineart_image)
401
+ lineart_image = lineart_image.convert("RGBA")
402
+
403
+ result_image, unfinished = process(colored_image, lineart_image, alpha, thickness)
404
+
405
+ output_image = Image.alpha_composite(result_image, lineart_image)
406
+
407
+ name = randomname(10)
408
+
409
+ os.makedirs(f"{output_dir}/{name}")
410
+ output_image.save(f"{output_dir}/{name}/output_image.png")
411
+ result_image.save(f"{output_dir}/{name}/color_image.png")
412
+ unfinished.save(f"{output_dir}/{name}/unfinished_image.png")
413
+
414
+
415
+ if __name__ == "__main__":
416
+ main()
utils.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import string
3
+ import os
4
+
5
+ import requests
6
+ from tqdm import tqdm
7
+
8
+
9
+ def randomname(n):
10
+ randlst = [random.choice(string.ascii_letters + string.digits) for i in range(n)]
11
+ return ''.join(randlst)
12
+
13
+ def load_cn_model(model_dir):
14
+ folder = model_dir
15
+ file_name = 'diffusion_pytorch_model.safetensors'
16
+ url = "https://huggingface.co/kataragi/ControlNet-LineartXL/resolve/main/Katarag_lineartXL-fp16.safetensors"
17
+
18
+ file_path = os.path.join(folder, file_name)
19
+ if not os.path.exists(file_path):
20
+ response = requests.get(url, stream=True)
21
+
22
+ total_size = int(response.headers.get('content-length', 0))
23
+ with open(file_path, 'wb') as f, tqdm(
24
+ desc=file_name,
25
+ total=total_size,
26
+ unit='iB',
27
+ unit_scale=True,
28
+ unit_divisor=1024,
29
+ ) as bar:
30
+ for data in response.iter_content(chunk_size=1024):
31
+ size = f.write(data)
32
+ bar.update(size)
33
+
34
+ def load_cn_config(model_dir):
35
+ folder = model_dir
36
+ file_name = 'config.json'
37
+ url = "https://huggingface.co/mattyamonaca/controlnet_line2line_xl/resolve/main/config.json"
38
+
39
+ file_path = os.path.join(folder, file_name)
40
+ if not os.path.exists(file_path):
41
+ response = requests.get(url, stream=True)
42
+
43
+ total_size = int(response.headers.get('content-length', 0))
44
+ with open(file_path, 'wb') as f, tqdm(
45
+ desc=file_name,
46
+ total=total_size,
47
+ unit='iB',
48
+ unit_scale=True,
49
+ unit_divisor=1024,
50
+ ) as bar:
51
+ for data in response.iter_content(chunk_size=1024):
52
+ size = f.write(data)
53
+ bar.update(size)