Spaces:
Runtime error
Runtime error
backend + frontend
Browse files- packages.txt +32 -0
- requirements.txt +12 -8
- run.py +3 -2
- stablediffusion-infinity/.gitignore +8 -0
- stablediffusion-infinity/.gitmodules +3 -0
- stablediffusion-infinity/LICENSE +201 -0
- stablediffusion-infinity/PyPatchMatch/.gitignore +3 -0
- stablediffusion-infinity/PyPatchMatch/LICENSE +21 -0
- stablediffusion-infinity/PyPatchMatch/Makefile +54 -0
- stablediffusion-infinity/PyPatchMatch/README.md +64 -0
- stablediffusion-infinity/PyPatchMatch/csrc/inpaint.cpp +234 -0
- stablediffusion-infinity/PyPatchMatch/csrc/inpaint.h +27 -0
- stablediffusion-infinity/PyPatchMatch/csrc/masked_image.cpp +138 -0
- stablediffusion-infinity/PyPatchMatch/csrc/masked_image.h +112 -0
- stablediffusion-infinity/PyPatchMatch/csrc/nnf.cpp +268 -0
- stablediffusion-infinity/PyPatchMatch/csrc/nnf.h +133 -0
- stablediffusion-infinity/PyPatchMatch/csrc/pyinterface.cpp +107 -0
- stablediffusion-infinity/PyPatchMatch/csrc/pyinterface.h +38 -0
- stablediffusion-infinity/PyPatchMatch/examples/.gitignore +2 -0
- stablediffusion-infinity/PyPatchMatch/examples/cpp_example.cpp +31 -0
- stablediffusion-infinity/PyPatchMatch/examples/cpp_example_run.sh +18 -0
- stablediffusion-infinity/PyPatchMatch/examples/images/forest.bmp +0 -0
- stablediffusion-infinity/PyPatchMatch/examples/images/forest_pruned.bmp +0 -0
- stablediffusion-infinity/PyPatchMatch/examples/py_example.py +21 -0
- stablediffusion-infinity/PyPatchMatch/examples/py_example_global_mask.py +27 -0
- stablediffusion-infinity/PyPatchMatch/opencv.pc +11 -0
- stablediffusion-infinity/PyPatchMatch/patch_match.py +191 -0
- stablediffusion-infinity/app.py +234 -0
- stablediffusion-infinity/mask.png +0 -0
- stablediffusion-infinity/perlin2d.py +45 -0
- stablediffusion-infinity/readme.md +93 -0
- stablediffusion-infinity/utils.py +140 -0
packages.txt
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python3-opencv
|
2 |
+
libopencv-dev
|
3 |
+
libopencv-core-dev
|
4 |
+
pkg-config
|
5 |
+
libopencv-imgcodecs-dev
|
6 |
+
libopencv-dev
|
7 |
+
libopencv-contrib-dev
|
8 |
+
build-essential
|
9 |
+
cmake
|
10 |
+
git
|
11 |
+
pkg-config
|
12 |
+
libgtk-3-dev
|
13 |
+
libavcodec-dev
|
14 |
+
libavformat-dev
|
15 |
+
libswscale-dev
|
16 |
+
libv4l-dev
|
17 |
+
libxvidcore-dev
|
18 |
+
libx264-dev
|
19 |
+
libjpeg-dev
|
20 |
+
libpng-dev
|
21 |
+
libtiff-dev
|
22 |
+
gfortran
|
23 |
+
openexr
|
24 |
+
libatlas-base-dev
|
25 |
+
python3-dev
|
26 |
+
python3-numpy
|
27 |
+
libtbb2
|
28 |
+
libtbb-dev
|
29 |
+
libdc1394-22-dev
|
30 |
+
libopenexr-dev
|
31 |
+
libgstreamer-plugins-base1.0-dev
|
32 |
+
libgstreamer1.0-dev
|
requirements.txt
CHANGED
@@ -1,8 +1,12 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cu113
|
2 |
+
torch
|
3 |
+
huggingface_hub
|
4 |
+
diffusers
|
5 |
+
transformers
|
6 |
+
scikit-image
|
7 |
+
pillow
|
8 |
+
opencv-python-headless
|
9 |
+
fastapi
|
10 |
+
uvicorn
|
11 |
+
httpx
|
12 |
+
gradio
|
run.py
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
-
import
|
2 |
|
3 |
-
|
|
|
|
1 |
+
import os
|
2 |
|
3 |
+
os.system("cd stablediffusion-infinity/PyPatchMatch && make clean && make")
|
4 |
+
os.system("cd stablediffusion-infinity && python app.py")
|
stablediffusion-infinity/.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
.ipynb_checkpoints/
|
3 |
+
build/
|
4 |
+
.idea/
|
5 |
+
travis.sh
|
6 |
+
*.iml
|
7 |
+
.token
|
8 |
+
libpatchmatch.so
|
stablediffusion-infinity/.gitmodules
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "PyPatchMatch"]
|
2 |
+
path = PyPatchMatch
|
3 |
+
url = https://github.com/vacancy/PyPatchMatch
|
stablediffusion-infinity/LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
stablediffusion-infinity/PyPatchMatch/.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
/build/
|
2 |
+
__pycache__
|
3 |
+
*.py[cod]
|
stablediffusion-infinity/PyPatchMatch/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2020 Jiayuan Mao
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
stablediffusion-infinity/PyPatchMatch/Makefile
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Makefile
|
3 |
+
# Jiayuan Mao, 2019-01-09 13:59
|
4 |
+
#
|
5 |
+
|
6 |
+
SRC_DIR = csrc
|
7 |
+
INC_DIR = csrc
|
8 |
+
OBJ_DIR = build/obj
|
9 |
+
TARGET = libpatchmatch.so
|
10 |
+
|
11 |
+
LIB_TARGET = $(TARGET)
|
12 |
+
INCLUDE_DIR = -I $(SRC_DIR) -I $(INC_DIR)
|
13 |
+
|
14 |
+
CXX = $(ENVIRONMENT_OPTIONS) g++
|
15 |
+
CXXFLAGS = -std=c++14
|
16 |
+
CXXFLAGS += -Ofast -ffast-math -w
|
17 |
+
# CXXFLAGS += -g
|
18 |
+
CXXFLAGS += $(shell pkg-config --cflags opencv.pc) -fPIC
|
19 |
+
CXXFLAGS += $(INCLUDE_DIR)
|
20 |
+
LDFLAGS = $(shell pkg-config --cflags --libs opencv.pc) -shared -fPIC
|
21 |
+
|
22 |
+
|
23 |
+
CXXSOURCES = $(shell find $(SRC_DIR)/ -name "*.cpp")
|
24 |
+
OBJS = $(addprefix $(OBJ_DIR)/,$(CXXSOURCES:.cpp=.o))
|
25 |
+
DEPFILES = $(OBJS:.o=.d)
|
26 |
+
|
27 |
+
.PHONY: all clean rebuild test
|
28 |
+
|
29 |
+
all: $(LIB_TARGET)
|
30 |
+
|
31 |
+
$(OBJ_DIR)/%.o: %.cpp
|
32 |
+
@echo "[CC] $< ..."
|
33 |
+
@$(CXX) -c $< $(CXXFLAGS) -o $@
|
34 |
+
|
35 |
+
$(OBJ_DIR)/%.d: %.cpp
|
36 |
+
@mkdir -pv $(dir $@)
|
37 |
+
@echo "[dep] $< ..."
|
38 |
+
@$(CXX) $(INCLUDE_DIR) $(CXXFLAGS) -MM -MT "$(OBJ_DIR)/$(<:.cpp=.o) $(OBJ_DIR)/$(<:.cpp=.d)" "$<" > "$@"
|
39 |
+
|
40 |
+
sinclude $(DEPFILES)
|
41 |
+
|
42 |
+
$(LIB_TARGET): $(OBJS)
|
43 |
+
@echo "[link] $(LIB_TARGET) ..."
|
44 |
+
@$(CXX) $(OBJS) -o $@ $(CXXFLAGS) $(LDFLAGS)
|
45 |
+
|
46 |
+
clean:
|
47 |
+
rm -rf $(OBJ_DIR) $(LIB_TARGET)
|
48 |
+
|
49 |
+
rebuild:
|
50 |
+
+@make clean
|
51 |
+
+@make
|
52 |
+
|
53 |
+
# vim:ft=make
|
54 |
+
#
|
stablediffusion-infinity/PyPatchMatch/README.md
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
PatchMatch based Inpainting
|
2 |
+
=====================================
|
3 |
+
This library implements the PatchMatch based inpainting algorithm. It provides both C++ and Python interfaces.
|
4 |
+
This implementation is heavily based on the implementation by Younesse ANDAM:
|
5 |
+
(younesse-cv/PatchMatch)[https://github.com/younesse-cv/PatchMatch], with some bugs fix.
|
6 |
+
|
7 |
+
Usage
|
8 |
+
-------------------------------------
|
9 |
+
|
10 |
+
You need to first install OpenCV to compile the C++ libraries. Then, run `make` to compile the
|
11 |
+
shared library `libpatchmatch.so`.
|
12 |
+
|
13 |
+
For Python users (example available at `examples/py_example.py`)
|
14 |
+
|
15 |
+
```python
|
16 |
+
import patch_match
|
17 |
+
|
18 |
+
image = ... # either a numpy ndarray or a PIL Image object.
|
19 |
+
mask = ... # either a numpy ndarray or a PIL Image object.
|
20 |
+
result = patch_match.inpaint(image, mask, patch_size=5)
|
21 |
+
```
|
22 |
+
|
23 |
+
For C++ users (examples available at `examples/cpp_example.cpp`)
|
24 |
+
|
25 |
+
```cpp
|
26 |
+
#include "inpaint.h"
|
27 |
+
|
28 |
+
int main() {
|
29 |
+
cv::Mat image = ...
|
30 |
+
cv::Mat mask = ...
|
31 |
+
|
32 |
+
cv::Mat result = Inpainting(image, mask, 5).run();
|
33 |
+
|
34 |
+
return 0;
|
35 |
+
}
|
36 |
+
```
|
37 |
+
|
38 |
+
|
39 |
+
README and COPYRIGHT by Younesse ANDAM
|
40 |
+
-------------------------------------
|
41 |
+
@Author: Younesse ANDAM
|
42 |
+
|
43 |
+
@Contact: younesse.andam@gmail.com
|
44 |
+
|
45 |
+
Description: This project is a personal implementation of an algorithm called PATCHMATCH that restores missing areas in an image.
|
46 |
+
The algorithm is presented in the following paper
|
47 |
+
PatchMatch A Randomized Correspondence Algorithm
|
48 |
+
for Structural Image Editing
|
49 |
+
by C.Barnes,E.Shechtman,A.Finkelstein and Dan B.Goldman
|
50 |
+
ACM Transactions on Graphics (Proc. SIGGRAPH), vol.28, aug-2009
|
51 |
+
|
52 |
+
For more information please refer to
|
53 |
+
http://www.cs.princeton.edu/gfx/pubs/Barnes_2009_PAR/index.php
|
54 |
+
|
55 |
+
Copyright (c) 2010-2011
|
56 |
+
|
57 |
+
|
58 |
+
Requirements
|
59 |
+
-------------------------------------
|
60 |
+
|
61 |
+
To run the project you need to install Opencv library and link it to your project.
|
62 |
+
Opencv can be download it here
|
63 |
+
http://opencv.org/downloads.html
|
64 |
+
|
stablediffusion-infinity/PyPatchMatch/csrc/inpaint.cpp
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <algorithm>
|
2 |
+
#include <iostream>
|
3 |
+
#include <opencv2/imgcodecs.hpp>
|
4 |
+
#include <opencv2/imgproc.hpp>
|
5 |
+
#include <opencv2/highgui.hpp>
|
6 |
+
|
7 |
+
#include "inpaint.h"
|
8 |
+
|
9 |
+
namespace {
|
10 |
+
static std::vector<double> kDistance2Similarity;
|
11 |
+
|
12 |
+
void init_kDistance2Similarity() {
|
13 |
+
double base[11] = {1.0, 0.99, 0.96, 0.83, 0.38, 0.11, 0.02, 0.005, 0.0006, 0.0001, 0};
|
14 |
+
int length = (PatchDistanceMetric::kDistanceScale + 1);
|
15 |
+
kDistance2Similarity.resize(length);
|
16 |
+
for (int i = 0; i < length; ++i) {
|
17 |
+
double t = (double) i / length;
|
18 |
+
int j = (int) (100 * t);
|
19 |
+
int k = j + 1;
|
20 |
+
double vj = (j < 11) ? base[j] : 0;
|
21 |
+
double vk = (k < 11) ? base[k] : 0;
|
22 |
+
kDistance2Similarity[i] = vj + (100 * t - j) * (vk - vj);
|
23 |
+
}
|
24 |
+
}
|
25 |
+
|
26 |
+
|
27 |
+
inline void _weighted_copy(const MaskedImage &source, int ys, int xs, cv::Mat &target, int yt, int xt, double weight) {
|
28 |
+
if (source.is_masked(ys, xs)) return;
|
29 |
+
if (source.is_globally_masked(ys, xs)) return;
|
30 |
+
|
31 |
+
auto source_ptr = source.get_image(ys, xs);
|
32 |
+
auto target_ptr = target.ptr<double>(yt, xt);
|
33 |
+
|
34 |
+
#pragma unroll
|
35 |
+
for (int c = 0; c < 3; ++c)
|
36 |
+
target_ptr[c] += static_cast<double>(source_ptr[c]) * weight;
|
37 |
+
target_ptr[3] += weight;
|
38 |
+
}
|
39 |
+
}
|
40 |
+
|
41 |
+
/**
|
42 |
+
* This algorithme uses a version proposed by Xavier Philippeau.
|
43 |
+
*/
|
44 |
+
|
45 |
+
Inpainting::Inpainting(cv::Mat image, cv::Mat mask, const PatchDistanceMetric *metric)
|
46 |
+
: m_initial(image, mask), m_distance_metric(metric), m_pyramid(), m_source2target(), m_target2source() {
|
47 |
+
_initialize_pyramid();
|
48 |
+
}
|
49 |
+
|
50 |
+
Inpainting::Inpainting(cv::Mat image, cv::Mat mask, cv::Mat global_mask, const PatchDistanceMetric *metric)
|
51 |
+
: m_initial(image, mask, global_mask), m_distance_metric(metric), m_pyramid(), m_source2target(), m_target2source() {
|
52 |
+
_initialize_pyramid();
|
53 |
+
}
|
54 |
+
|
55 |
+
void Inpainting::_initialize_pyramid() {
|
56 |
+
auto source = m_initial;
|
57 |
+
m_pyramid.push_back(source);
|
58 |
+
while (source.size().height > m_distance_metric->patch_size() && source.size().width > m_distance_metric->patch_size()) {
|
59 |
+
source = source.downsample();
|
60 |
+
m_pyramid.push_back(source);
|
61 |
+
}
|
62 |
+
|
63 |
+
if (kDistance2Similarity.size() == 0) {
|
64 |
+
init_kDistance2Similarity();
|
65 |
+
}
|
66 |
+
}
|
67 |
+
|
68 |
+
cv::Mat Inpainting::run(bool verbose, bool verbose_visualize, unsigned int random_seed) {
|
69 |
+
srand(random_seed);
|
70 |
+
const int nr_levels = m_pyramid.size();
|
71 |
+
|
72 |
+
MaskedImage source, target;
|
73 |
+
for (int level = nr_levels - 1; level >= 0; --level) {
|
74 |
+
if (verbose) std::cerr << "Inpainting level: " << level << std::endl;
|
75 |
+
|
76 |
+
source = m_pyramid[level];
|
77 |
+
|
78 |
+
if (level == nr_levels - 1) {
|
79 |
+
target = source.clone();
|
80 |
+
target.clear_mask();
|
81 |
+
m_source2target = NearestNeighborField(source, target, m_distance_metric);
|
82 |
+
m_target2source = NearestNeighborField(target, source, m_distance_metric);
|
83 |
+
} else {
|
84 |
+
m_source2target = NearestNeighborField(source, target, m_distance_metric, m_source2target);
|
85 |
+
m_target2source = NearestNeighborField(target, source, m_distance_metric, m_target2source);
|
86 |
+
}
|
87 |
+
|
88 |
+
if (verbose) std::cerr << "Initialization done." << std::endl;
|
89 |
+
|
90 |
+
if (verbose_visualize) {
|
91 |
+
auto visualize_size = m_initial.size();
|
92 |
+
cv::Mat source_visualize(visualize_size, m_initial.image().type());
|
93 |
+
cv::resize(source.image(), source_visualize, visualize_size);
|
94 |
+
cv::imshow("Source", source_visualize);
|
95 |
+
cv::Mat target_visualize(visualize_size, m_initial.image().type());
|
96 |
+
cv::resize(target.image(), target_visualize, visualize_size);
|
97 |
+
cv::imshow("Target", target_visualize);
|
98 |
+
cv::waitKey(0);
|
99 |
+
}
|
100 |
+
|
101 |
+
target = _expectation_maximization(source, target, level, verbose);
|
102 |
+
}
|
103 |
+
|
104 |
+
return target.image();
|
105 |
+
}
|
106 |
+
|
107 |
+
// EM-Like algorithm (see "PatchMatch" - page 6).
|
108 |
+
// Returns a double sized target image (unless level = 0).
|
109 |
+
MaskedImage Inpainting::_expectation_maximization(MaskedImage source, MaskedImage target, int level, bool verbose) {
|
110 |
+
const int nr_iters_em = 1 + 2 * level;
|
111 |
+
const int nr_iters_nnf = static_cast<int>(std::min(7, 1 + level));
|
112 |
+
const int patch_size = m_distance_metric->patch_size();
|
113 |
+
|
114 |
+
MaskedImage new_source, new_target;
|
115 |
+
|
116 |
+
for (int iter_em = 0; iter_em < nr_iters_em; ++iter_em) {
|
117 |
+
if (iter_em != 0) {
|
118 |
+
m_source2target.set_target(new_target);
|
119 |
+
m_target2source.set_source(new_target);
|
120 |
+
target = new_target;
|
121 |
+
}
|
122 |
+
|
123 |
+
if (verbose) std::cerr << "EM Iteration: " << iter_em << std::endl;
|
124 |
+
|
125 |
+
auto size = source.size();
|
126 |
+
for (int i = 0; i < size.height; ++i) {
|
127 |
+
for (int j = 0; j < size.width; ++j) {
|
128 |
+
if (!source.contains_mask(i, j, patch_size)) {
|
129 |
+
m_source2target.set_identity(i, j);
|
130 |
+
m_target2source.set_identity(i, j);
|
131 |
+
}
|
132 |
+
}
|
133 |
+
}
|
134 |
+
if (verbose) std::cerr << " NNF minimization started." << std::endl;
|
135 |
+
m_source2target.minimize(nr_iters_nnf);
|
136 |
+
m_target2source.minimize(nr_iters_nnf);
|
137 |
+
if (verbose) std::cerr << " NNF minimization finished." << std::endl;
|
138 |
+
|
139 |
+
// Instead of upsizing the final target, we build the last target from the next level source image.
|
140 |
+
// Thus, the final target is less blurry (see "Space-Time Video Completion" - page 5).
|
141 |
+
bool upscaled = false;
|
142 |
+
if (level >= 1 && iter_em == nr_iters_em - 1) {
|
143 |
+
new_source = m_pyramid[level - 1];
|
144 |
+
new_target = target.upsample(new_source.size().width, new_source.size().height, m_pyramid[level - 1].global_mask());
|
145 |
+
upscaled = true;
|
146 |
+
} else {
|
147 |
+
new_source = m_pyramid[level];
|
148 |
+
new_target = target.clone();
|
149 |
+
}
|
150 |
+
|
151 |
+
auto vote = cv::Mat(new_target.size(), CV_64FC4);
|
152 |
+
vote.setTo(cv::Scalar::all(0));
|
153 |
+
|
154 |
+
// Votes for best patch from NNF Source->Target (completeness) and Target->Source (coherence).
|
155 |
+
_expectation_step(m_source2target, 1, vote, new_source, upscaled);
|
156 |
+
if (verbose) std::cerr << " Expectation source to target finished." << std::endl;
|
157 |
+
_expectation_step(m_target2source, 0, vote, new_source, upscaled);
|
158 |
+
if (verbose) std::cerr << " Expectation target to source finished." << std::endl;
|
159 |
+
|
160 |
+
// Compile votes and update pixel values.
|
161 |
+
_maximization_step(new_target, vote);
|
162 |
+
if (verbose) std::cerr << " Minimization step finished." << std::endl;
|
163 |
+
}
|
164 |
+
|
165 |
+
return new_target;
|
166 |
+
}
|
167 |
+
|
168 |
+
// Expectation step: vote for best estimations of each pixel.
|
169 |
+
void Inpainting::_expectation_step(
|
170 |
+
const NearestNeighborField &nnf, bool source2target,
|
171 |
+
cv::Mat &vote, const MaskedImage &source, bool upscaled
|
172 |
+
) {
|
173 |
+
auto source_size = nnf.source_size();
|
174 |
+
auto target_size = nnf.target_size();
|
175 |
+
const int patch_size = m_distance_metric->patch_size();
|
176 |
+
|
177 |
+
for (int i = 0; i < source_size.height; ++i) {
|
178 |
+
for (int j = 0; j < source_size.width; ++j) {
|
179 |
+
if (nnf.source().is_globally_masked(i, j)) continue;
|
180 |
+
int yp = nnf.at(i, j, 0), xp = nnf.at(i, j, 1), dp = nnf.at(i, j, 2);
|
181 |
+
double w = kDistance2Similarity[dp];
|
182 |
+
|
183 |
+
for (int di = -patch_size; di <= patch_size; ++di) {
|
184 |
+
for (int dj = -patch_size; dj <= patch_size; ++dj) {
|
185 |
+
int ys = i + di, xs = j + dj, yt = yp + di, xt = xp + dj;
|
186 |
+
if (!(ys >= 0 && ys < source_size.height && xs >= 0 && xs < source_size.width)) continue;
|
187 |
+
if (nnf.source().is_globally_masked(ys, xs)) continue;
|
188 |
+
if (!(yt >= 0 && yt < target_size.height && xt >= 0 && xt < target_size.width)) continue;
|
189 |
+
if (nnf.target().is_globally_masked(yt, xt)) continue;
|
190 |
+
|
191 |
+
if (!source2target) {
|
192 |
+
std::swap(ys, yt);
|
193 |
+
std::swap(xs, xt);
|
194 |
+
}
|
195 |
+
|
196 |
+
if (upscaled) {
|
197 |
+
for (int uy = 0; uy < 2; ++uy) {
|
198 |
+
for (int ux = 0; ux < 2; ++ux) {
|
199 |
+
_weighted_copy(source, 2 * ys + uy, 2 * xs + ux, vote, 2 * yt + uy, 2 * xt + ux, w);
|
200 |
+
}
|
201 |
+
}
|
202 |
+
} else {
|
203 |
+
_weighted_copy(source, ys, xs, vote, yt, xt, w);
|
204 |
+
}
|
205 |
+
}
|
206 |
+
}
|
207 |
+
}
|
208 |
+
}
|
209 |
+
}
|
210 |
+
|
211 |
+
// Maximization Step: maximum likelihood of target pixel.
|
212 |
+
void Inpainting::_maximization_step(MaskedImage &target, const cv::Mat &vote) {
|
213 |
+
auto target_size = target.size();
|
214 |
+
for (int i = 0; i < target_size.height; ++i) {
|
215 |
+
for (int j = 0; j < target_size.width; ++j) {
|
216 |
+
const double *source_ptr = vote.ptr<double>(i, j);
|
217 |
+
unsigned char *target_ptr = target.get_mutable_image(i, j);
|
218 |
+
|
219 |
+
if (target.is_globally_masked(i, j)) {
|
220 |
+
continue;
|
221 |
+
}
|
222 |
+
|
223 |
+
if (source_ptr[3] > 0) {
|
224 |
+
unsigned char r = cv::saturate_cast<unsigned char>(source_ptr[0] / source_ptr[3]);
|
225 |
+
unsigned char g = cv::saturate_cast<unsigned char>(source_ptr[1] / source_ptr[3]);
|
226 |
+
unsigned char b = cv::saturate_cast<unsigned char>(source_ptr[2] / source_ptr[3]);
|
227 |
+
target_ptr[0] = r, target_ptr[1] = g, target_ptr[2] = b;
|
228 |
+
} else {
|
229 |
+
target.set_mask(i, j, 0);
|
230 |
+
}
|
231 |
+
}
|
232 |
+
}
|
233 |
+
}
|
234 |
+
|
stablediffusion-infinity/PyPatchMatch/csrc/inpaint.h
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <vector>
|
4 |
+
|
5 |
+
#include "masked_image.h"
|
6 |
+
#include "nnf.h"
|
7 |
+
|
8 |
+
class Inpainting {
|
9 |
+
public:
|
10 |
+
Inpainting(cv::Mat image, cv::Mat mask, const PatchDistanceMetric *metric);
|
11 |
+
Inpainting(cv::Mat image, cv::Mat mask, cv::Mat global_mask, const PatchDistanceMetric *metric);
|
12 |
+
cv::Mat run(bool verbose = false, bool verbose_visualize = false, unsigned int random_seed = 1212);
|
13 |
+
|
14 |
+
private:
|
15 |
+
void _initialize_pyramid(void);
|
16 |
+
MaskedImage _expectation_maximization(MaskedImage source, MaskedImage target, int level, bool verbose);
|
17 |
+
void _expectation_step(const NearestNeighborField &nnf, bool source2target, cv::Mat &vote, const MaskedImage &source, bool upscaled);
|
18 |
+
void _maximization_step(MaskedImage &target, const cv::Mat &vote);
|
19 |
+
|
20 |
+
MaskedImage m_initial;
|
21 |
+
std::vector<MaskedImage> m_pyramid;
|
22 |
+
|
23 |
+
NearestNeighborField m_source2target;
|
24 |
+
NearestNeighborField m_target2source;
|
25 |
+
const PatchDistanceMetric *m_distance_metric;
|
26 |
+
};
|
27 |
+
|
stablediffusion-infinity/PyPatchMatch/csrc/masked_image.cpp
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include "masked_image.h"
|
2 |
+
#include <algorithm>
|
3 |
+
#include <iostream>
|
4 |
+
|
5 |
+
const cv::Size MaskedImage::kDownsampleKernelSize = cv::Size(6, 6);
|
6 |
+
const int MaskedImage::kDownsampleKernel[6] = {1, 5, 10, 10, 5, 1};
|
7 |
+
|
8 |
+
bool MaskedImage::contains_mask(int y, int x, int patch_size) const {
|
9 |
+
auto mask_size = size();
|
10 |
+
for (int dy = -patch_size; dy <= patch_size; ++dy) {
|
11 |
+
for (int dx = -patch_size; dx <= patch_size; ++dx) {
|
12 |
+
int yy = y + dy, xx = x + dx;
|
13 |
+
if (yy >= 0 && yy < mask_size.height && xx >= 0 && xx < mask_size.width) {
|
14 |
+
if (is_masked(yy, xx) && !is_globally_masked(yy, xx)) return true;
|
15 |
+
}
|
16 |
+
}
|
17 |
+
}
|
18 |
+
return false;
|
19 |
+
}
|
20 |
+
|
21 |
+
MaskedImage MaskedImage::downsample() const {
|
22 |
+
const auto &kernel_size = MaskedImage::kDownsampleKernelSize;
|
23 |
+
const auto &kernel = MaskedImage::kDownsampleKernel;
|
24 |
+
|
25 |
+
const auto size = this->size();
|
26 |
+
const auto new_size = cv::Size(size.width / 2, size.height / 2);
|
27 |
+
|
28 |
+
auto ret = MaskedImage(new_size.width, new_size.height);
|
29 |
+
if (!m_global_mask.empty()) ret.init_global_mask_mat();
|
30 |
+
for (int y = 0; y < size.height - 1; y += 2) {
|
31 |
+
for (int x = 0; x < size.width - 1; x += 2) {
|
32 |
+
int r = 0, g = 0, b = 0, ksum = 0;
|
33 |
+
bool is_gmasked = true;
|
34 |
+
|
35 |
+
for (int dy = -kernel_size.height / 2 + 1; dy <= kernel_size.height / 2; ++dy) {
|
36 |
+
for (int dx = -kernel_size.width / 2 + 1; dx <= kernel_size.width / 2; ++dx) {
|
37 |
+
int yy = y + dy, xx = x + dx;
|
38 |
+
if (yy >= 0 && yy < size.height && xx >= 0 && xx < size.width) {
|
39 |
+
if (!is_globally_masked(yy, xx)) {
|
40 |
+
is_gmasked = false;
|
41 |
+
}
|
42 |
+
if (!is_masked(yy, xx)) {
|
43 |
+
auto source_ptr = get_image(yy, xx);
|
44 |
+
int k = kernel[kernel_size.height / 2 - 1 + dy] * kernel[kernel_size.width / 2 - 1 + dx];
|
45 |
+
r += source_ptr[0] * k, g += source_ptr[1] * k, b += source_ptr[2] * k;
|
46 |
+
ksum += k;
|
47 |
+
}
|
48 |
+
}
|
49 |
+
}
|
50 |
+
}
|
51 |
+
|
52 |
+
if (ksum > 0) r /= ksum, g /= ksum, b /= ksum;
|
53 |
+
|
54 |
+
if (!m_global_mask.empty()) {
|
55 |
+
ret.set_global_mask(y / 2, x / 2, is_gmasked);
|
56 |
+
}
|
57 |
+
if (ksum > 0) {
|
58 |
+
auto target_ptr = ret.get_mutable_image(y / 2, x / 2);
|
59 |
+
target_ptr[0] = r, target_ptr[1] = g, target_ptr[2] = b;
|
60 |
+
ret.set_mask(y / 2, x / 2, 0);
|
61 |
+
} else {
|
62 |
+
ret.set_mask(y / 2, x / 2, 1);
|
63 |
+
}
|
64 |
+
}
|
65 |
+
}
|
66 |
+
|
67 |
+
return ret;
|
68 |
+
}
|
69 |
+
|
70 |
+
MaskedImage MaskedImage::upsample(int new_w, int new_h) const {
|
71 |
+
const auto size = this->size();
|
72 |
+
auto ret = MaskedImage(new_w, new_h);
|
73 |
+
if (!m_global_mask.empty()) ret.init_global_mask_mat();
|
74 |
+
for (int y = 0; y < new_h; ++y) {
|
75 |
+
for (int x = 0; x < new_w; ++x) {
|
76 |
+
int yy = y * size.height / new_h;
|
77 |
+
int xx = x * size.width / new_w;
|
78 |
+
|
79 |
+
if (is_globally_masked(yy, xx)) {
|
80 |
+
ret.set_global_mask(y, x, 1);
|
81 |
+
ret.set_mask(y, x, 1);
|
82 |
+
} else {
|
83 |
+
if (!m_global_mask.empty()) ret.set_global_mask(y, x, 0);
|
84 |
+
|
85 |
+
if (is_masked(yy, xx)) {
|
86 |
+
ret.set_mask(y, x, 1);
|
87 |
+
} else {
|
88 |
+
auto source_ptr = get_image(yy, xx);
|
89 |
+
auto target_ptr = ret.get_mutable_image(y, x);
|
90 |
+
for (int c = 0; c < 3; ++c)
|
91 |
+
target_ptr[c] = source_ptr[c];
|
92 |
+
ret.set_mask(y, x, 0);
|
93 |
+
}
|
94 |
+
}
|
95 |
+
}
|
96 |
+
}
|
97 |
+
|
98 |
+
return ret;
|
99 |
+
}
|
100 |
+
|
101 |
+
MaskedImage MaskedImage::upsample(int new_w, int new_h, const cv::Mat &new_global_mask) const {
|
102 |
+
auto ret = upsample(new_w, new_h);
|
103 |
+
ret.set_global_mask_mat(new_global_mask);
|
104 |
+
return ret;
|
105 |
+
}
|
106 |
+
|
107 |
+
void MaskedImage::compute_image_gradients() {
|
108 |
+
if (m_image_grad_computed) {
|
109 |
+
return;
|
110 |
+
}
|
111 |
+
|
112 |
+
const auto size = m_image.size();
|
113 |
+
m_image_grady = cv::Mat(size, CV_8UC3);
|
114 |
+
m_image_gradx = cv::Mat(size, CV_8UC3);
|
115 |
+
m_image_grady = cv::Scalar::all(0);
|
116 |
+
m_image_gradx = cv::Scalar::all(0);
|
117 |
+
|
118 |
+
for (int i = 1; i < size.height - 1; ++i) {
|
119 |
+
const auto *ptr = m_image.ptr<unsigned char>(i, 0);
|
120 |
+
const auto *ptry1 = m_image.ptr<unsigned char>(i + 1, 0);
|
121 |
+
const auto *ptry2 = m_image.ptr<unsigned char>(i - 1, 0);
|
122 |
+
const auto *ptrx1 = m_image.ptr<unsigned char>(i, 0) + 3;
|
123 |
+
const auto *ptrx2 = m_image.ptr<unsigned char>(i, 0) - 3;
|
124 |
+
auto *mptry = m_image_grady.ptr<unsigned char>(i, 0);
|
125 |
+
auto *mptrx = m_image_gradx.ptr<unsigned char>(i, 0);
|
126 |
+
for (int j = 3; j < size.width * 3 - 3; ++j) {
|
127 |
+
mptry[j] = (ptry1[j] / 2 - ptry2[j] / 2) + 128;
|
128 |
+
mptrx[j] = (ptrx1[j] / 2 - ptrx2[j] / 2) + 128;
|
129 |
+
}
|
130 |
+
}
|
131 |
+
|
132 |
+
m_image_grad_computed = true;
|
133 |
+
}
|
134 |
+
|
135 |
+
void MaskedImage::compute_image_gradients() const {
|
136 |
+
const_cast<MaskedImage *>(this)->compute_image_gradients();
|
137 |
+
}
|
138 |
+
|
stablediffusion-infinity/PyPatchMatch/csrc/masked_image.h
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <opencv2/core.hpp>
|
4 |
+
|
5 |
+
class MaskedImage {
|
6 |
+
public:
|
7 |
+
MaskedImage() : m_image(), m_mask(), m_global_mask(), m_image_grady(), m_image_gradx(), m_image_grad_computed(false) {
|
8 |
+
// pass
|
9 |
+
}
|
10 |
+
MaskedImage(cv::Mat image, cv::Mat mask) : m_image(image), m_mask(mask), m_image_grad_computed(false) {
|
11 |
+
// pass
|
12 |
+
}
|
13 |
+
MaskedImage(cv::Mat image, cv::Mat mask, cv::Mat global_mask) : m_image(image), m_mask(mask), m_global_mask(global_mask), m_image_grad_computed(false) {
|
14 |
+
// pass
|
15 |
+
}
|
16 |
+
MaskedImage(cv::Mat image, cv::Mat mask, cv::Mat global_mask, cv::Mat grady, cv::Mat gradx, bool grad_computed) :
|
17 |
+
m_image(image), m_mask(mask), m_global_mask(global_mask),
|
18 |
+
m_image_grady(grady), m_image_gradx(gradx), m_image_grad_computed(grad_computed) {
|
19 |
+
// pass
|
20 |
+
}
|
21 |
+
MaskedImage(int width, int height) : m_global_mask(), m_image_grady(), m_image_gradx() {
|
22 |
+
m_image = cv::Mat(cv::Size(width, height), CV_8UC3);
|
23 |
+
m_image = cv::Scalar::all(0);
|
24 |
+
|
25 |
+
m_mask = cv::Mat(cv::Size(width, height), CV_8U);
|
26 |
+
m_mask = cv::Scalar::all(0);
|
27 |
+
}
|
28 |
+
inline MaskedImage clone() {
|
29 |
+
return MaskedImage(
|
30 |
+
m_image.clone(), m_mask.clone(), m_global_mask.clone(),
|
31 |
+
m_image_grady.clone(), m_image_gradx.clone(), m_image_grad_computed
|
32 |
+
);
|
33 |
+
}
|
34 |
+
|
35 |
+
inline cv::Size size() const {
|
36 |
+
return m_image.size();
|
37 |
+
}
|
38 |
+
inline const cv::Mat &image() const {
|
39 |
+
return m_image;
|
40 |
+
}
|
41 |
+
inline const cv::Mat &mask() const {
|
42 |
+
return m_mask;
|
43 |
+
}
|
44 |
+
inline const cv::Mat &global_mask() const {
|
45 |
+
return m_global_mask;
|
46 |
+
}
|
47 |
+
inline const cv::Mat &grady() const {
|
48 |
+
assert(m_image_grad_computed);
|
49 |
+
return m_image_grady;
|
50 |
+
}
|
51 |
+
inline const cv::Mat &gradx() const {
|
52 |
+
assert(m_image_grad_computed);
|
53 |
+
return m_image_gradx;
|
54 |
+
}
|
55 |
+
|
56 |
+
inline void init_global_mask_mat() {
|
57 |
+
m_global_mask = cv::Mat(m_mask.size(), CV_8U);
|
58 |
+
m_global_mask.setTo(cv::Scalar(0));
|
59 |
+
}
|
60 |
+
inline void set_global_mask_mat(const cv::Mat &other) {
|
61 |
+
m_global_mask = other;
|
62 |
+
}
|
63 |
+
|
64 |
+
inline bool is_masked(int y, int x) const {
|
65 |
+
return static_cast<bool>(m_mask.at<unsigned char>(y, x));
|
66 |
+
}
|
67 |
+
inline bool is_globally_masked(int y, int x) const {
|
68 |
+
return !m_global_mask.empty() && static_cast<bool>(m_global_mask.at<unsigned char>(y, x));
|
69 |
+
}
|
70 |
+
inline void set_mask(int y, int x, bool value) {
|
71 |
+
m_mask.at<unsigned char>(y, x) = static_cast<unsigned char>(value);
|
72 |
+
}
|
73 |
+
inline void set_global_mask(int y, int x, bool value) {
|
74 |
+
m_global_mask.at<unsigned char>(y, x) = static_cast<unsigned char>(value);
|
75 |
+
}
|
76 |
+
inline void clear_mask() {
|
77 |
+
m_mask.setTo(cv::Scalar(0));
|
78 |
+
}
|
79 |
+
|
80 |
+
inline const unsigned char *get_image(int y, int x) const {
|
81 |
+
return m_image.ptr<unsigned char>(y, x);
|
82 |
+
}
|
83 |
+
inline unsigned char *get_mutable_image(int y, int x) {
|
84 |
+
return m_image.ptr<unsigned char>(y, x);
|
85 |
+
}
|
86 |
+
|
87 |
+
inline unsigned char get_image(int y, int x, int c) const {
|
88 |
+
return m_image.ptr<unsigned char>(y, x)[c];
|
89 |
+
}
|
90 |
+
inline int get_image_int(int y, int x, int c) const {
|
91 |
+
return static_cast<int>(m_image.ptr<unsigned char>(y, x)[c]);
|
92 |
+
}
|
93 |
+
|
94 |
+
bool contains_mask(int y, int x, int patch_size) const;
|
95 |
+
MaskedImage downsample() const;
|
96 |
+
MaskedImage upsample(int new_w, int new_h) const;
|
97 |
+
MaskedImage upsample(int new_w, int new_h, const cv::Mat &new_global_mask) const;
|
98 |
+
void compute_image_gradients();
|
99 |
+
void compute_image_gradients() const;
|
100 |
+
|
101 |
+
static const cv::Size kDownsampleKernelSize;
|
102 |
+
static const int kDownsampleKernel[6];
|
103 |
+
|
104 |
+
private:
|
105 |
+
cv::Mat m_image;
|
106 |
+
cv::Mat m_mask;
|
107 |
+
cv::Mat m_global_mask;
|
108 |
+
cv::Mat m_image_grady;
|
109 |
+
cv::Mat m_image_gradx;
|
110 |
+
bool m_image_grad_computed = false;
|
111 |
+
};
|
112 |
+
|
stablediffusion-infinity/PyPatchMatch/csrc/nnf.cpp
ADDED
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <algorithm>
|
2 |
+
#include <iostream>
|
3 |
+
#include <cmath>
|
4 |
+
|
5 |
+
#include "masked_image.h"
|
6 |
+
#include "nnf.h"
|
7 |
+
|
8 |
+
/**
|
9 |
+
* Nearest-Neighbor Field (see PatchMatch algorithm).
|
10 |
+
* This algorithme uses a version proposed by Xavier Philippeau.
|
11 |
+
*
|
12 |
+
*/
|
13 |
+
|
14 |
+
template <typename T>
|
15 |
+
T clamp(T value, T min_value, T max_value) {
|
16 |
+
return std::min(std::max(value, min_value), max_value);
|
17 |
+
}
|
18 |
+
|
19 |
+
void NearestNeighborField::_randomize_field(int max_retry, bool reset) {
|
20 |
+
auto this_size = source_size();
|
21 |
+
for (int i = 0; i < this_size.height; ++i) {
|
22 |
+
for (int j = 0; j < this_size.width; ++j) {
|
23 |
+
if (m_source.is_globally_masked(i, j)) continue;
|
24 |
+
|
25 |
+
auto this_ptr = mutable_ptr(i, j);
|
26 |
+
int distance = reset ? PatchDistanceMetric::kDistanceScale : this_ptr[2];
|
27 |
+
if (distance < PatchDistanceMetric::kDistanceScale) {
|
28 |
+
continue;
|
29 |
+
}
|
30 |
+
|
31 |
+
int i_target = 0, j_target = 0;
|
32 |
+
for (int t = 0; t < max_retry; ++t) {
|
33 |
+
i_target = rand() % this_size.height;
|
34 |
+
j_target = rand() % this_size.width;
|
35 |
+
if (m_target.is_globally_masked(i_target, j_target)) continue;
|
36 |
+
|
37 |
+
distance = _distance(i, j, i_target, j_target);
|
38 |
+
if (distance < PatchDistanceMetric::kDistanceScale)
|
39 |
+
break;
|
40 |
+
}
|
41 |
+
|
42 |
+
this_ptr[0] = i_target, this_ptr[1] = j_target, this_ptr[2] = distance;
|
43 |
+
}
|
44 |
+
}
|
45 |
+
}
|
46 |
+
|
47 |
+
void NearestNeighborField::_initialize_field_from(const NearestNeighborField &other, int max_retry) {
|
48 |
+
const auto &this_size = source_size();
|
49 |
+
const auto &other_size = other.source_size();
|
50 |
+
double fi = static_cast<double>(this_size.height) / other_size.height;
|
51 |
+
double fj = static_cast<double>(this_size.width) / other_size.width;
|
52 |
+
|
53 |
+
for (int i = 0; i < this_size.height; ++i) {
|
54 |
+
for (int j = 0; j < this_size.width; ++j) {
|
55 |
+
if (m_source.is_globally_masked(i, j)) continue;
|
56 |
+
|
57 |
+
int ilow = static_cast<int>(std::min(i / fi, static_cast<double>(other_size.height - 1)));
|
58 |
+
int jlow = static_cast<int>(std::min(j / fj, static_cast<double>(other_size.width - 1)));
|
59 |
+
auto this_value = mutable_ptr(i, j);
|
60 |
+
auto other_value = other.ptr(ilow, jlow);
|
61 |
+
|
62 |
+
this_value[0] = static_cast<int>(other_value[0] * fi);
|
63 |
+
this_value[1] = static_cast<int>(other_value[1] * fj);
|
64 |
+
this_value[2] = _distance(i, j, this_value[0], this_value[1]);
|
65 |
+
}
|
66 |
+
}
|
67 |
+
|
68 |
+
_randomize_field(max_retry, false);
|
69 |
+
}
|
70 |
+
|
71 |
+
void NearestNeighborField::minimize(int nr_pass) {
|
72 |
+
const auto &this_size = source_size();
|
73 |
+
while (nr_pass--) {
|
74 |
+
for (int i = 0; i < this_size.height; ++i)
|
75 |
+
for (int j = 0; j < this_size.width; ++j) {
|
76 |
+
if (m_source.is_globally_masked(i, j)) continue;
|
77 |
+
if (at(i, j, 2) > 0) _minimize_link(i, j, +1);
|
78 |
+
}
|
79 |
+
for (int i = this_size.height - 1; i >= 0; --i)
|
80 |
+
for (int j = this_size.width - 1; j >= 0; --j) {
|
81 |
+
if (m_source.is_globally_masked(i, j)) continue;
|
82 |
+
if (at(i, j, 2) > 0) _minimize_link(i, j, -1);
|
83 |
+
}
|
84 |
+
}
|
85 |
+
}
|
86 |
+
|
87 |
+
void NearestNeighborField::_minimize_link(int y, int x, int direction) {
|
88 |
+
const auto &this_size = source_size();
|
89 |
+
const auto &this_target_size = target_size();
|
90 |
+
auto this_ptr = mutable_ptr(y, x);
|
91 |
+
|
92 |
+
// propagation along the y direction.
|
93 |
+
if (y - direction >= 0 && y - direction < this_size.height && !m_source.is_globally_masked(y - direction, x)) {
|
94 |
+
int yp = at(y - direction, x, 0) + direction;
|
95 |
+
int xp = at(y - direction, x, 1);
|
96 |
+
int dp = _distance(y, x, yp, xp);
|
97 |
+
if (dp < at(y, x, 2)) {
|
98 |
+
this_ptr[0] = yp, this_ptr[1] = xp, this_ptr[2] = dp;
|
99 |
+
}
|
100 |
+
}
|
101 |
+
|
102 |
+
// propagation along the x direction.
|
103 |
+
if (x - direction >= 0 && x - direction < this_size.width && !m_source.is_globally_masked(y, x - direction)) {
|
104 |
+
int yp = at(y, x - direction, 0);
|
105 |
+
int xp = at(y, x - direction, 1) + direction;
|
106 |
+
int dp = _distance(y, x, yp, xp);
|
107 |
+
if (dp < at(y, x, 2)) {
|
108 |
+
this_ptr[0] = yp, this_ptr[1] = xp, this_ptr[2] = dp;
|
109 |
+
}
|
110 |
+
}
|
111 |
+
|
112 |
+
// random search with a progressive step size.
|
113 |
+
int random_scale = (std::min(this_target_size.height, this_target_size.width) - 1) / 2;
|
114 |
+
while (random_scale > 0) {
|
115 |
+
int yp = this_ptr[0] + (rand() % (2 * random_scale + 1) - random_scale);
|
116 |
+
int xp = this_ptr[1] + (rand() % (2 * random_scale + 1) - random_scale);
|
117 |
+
yp = clamp(yp, 0, target_size().height - 1);
|
118 |
+
xp = clamp(xp, 0, target_size().width - 1);
|
119 |
+
|
120 |
+
if (m_target.is_globally_masked(yp, xp)) {
|
121 |
+
random_scale /= 2;
|
122 |
+
}
|
123 |
+
|
124 |
+
int dp = _distance(y, x, yp, xp);
|
125 |
+
if (dp < at(y, x, 2)) {
|
126 |
+
this_ptr[0] = yp, this_ptr[1] = xp, this_ptr[2] = dp;
|
127 |
+
}
|
128 |
+
random_scale /= 2;
|
129 |
+
}
|
130 |
+
}
|
131 |
+
|
132 |
+
const int PatchDistanceMetric::kDistanceScale = 65535;
|
133 |
+
const int PatchSSDDistanceMetric::kSSDScale = 9 * 255 * 255;
|
134 |
+
|
135 |
+
namespace {
|
136 |
+
|
137 |
+
inline int pow2(int i) {
|
138 |
+
return i * i;
|
139 |
+
}
|
140 |
+
|
141 |
+
int distance_masked_images(
|
142 |
+
const MaskedImage &source, int ys, int xs,
|
143 |
+
const MaskedImage &target, int yt, int xt,
|
144 |
+
int patch_size
|
145 |
+
) {
|
146 |
+
long double distance = 0;
|
147 |
+
long double wsum = 0;
|
148 |
+
|
149 |
+
source.compute_image_gradients();
|
150 |
+
target.compute_image_gradients();
|
151 |
+
|
152 |
+
auto source_size = source.size();
|
153 |
+
auto target_size = target.size();
|
154 |
+
|
155 |
+
for (int dy = -patch_size; dy <= patch_size; ++dy) {
|
156 |
+
const int yys = ys + dy, yyt = yt + dy;
|
157 |
+
|
158 |
+
if (yys <= 0 || yys >= source_size.height - 1 || yyt <= 0 || yyt >= target_size.height - 1) {
|
159 |
+
distance += (long double)(PatchSSDDistanceMetric::kSSDScale) * (2 * patch_size + 1);
|
160 |
+
wsum += 2 * patch_size + 1;
|
161 |
+
continue;
|
162 |
+
}
|
163 |
+
|
164 |
+
const auto *p_si = source.image().ptr<unsigned char>(yys, 0);
|
165 |
+
const auto *p_ti = target.image().ptr<unsigned char>(yyt, 0);
|
166 |
+
const auto *p_sm = source.mask().ptr<unsigned char>(yys, 0);
|
167 |
+
const auto *p_tm = target.mask().ptr<unsigned char>(yyt, 0);
|
168 |
+
|
169 |
+
const unsigned char *p_sgm = nullptr;
|
170 |
+
const unsigned char *p_tgm = nullptr;
|
171 |
+
if (!source.global_mask().empty()) {
|
172 |
+
p_sgm = source.global_mask().ptr<unsigned char>(yys, 0);
|
173 |
+
p_tgm = target.global_mask().ptr<unsigned char>(yyt, 0);
|
174 |
+
}
|
175 |
+
|
176 |
+
const auto *p_sgy = source.grady().ptr<unsigned char>(yys, 0);
|
177 |
+
const auto *p_tgy = target.grady().ptr<unsigned char>(yyt, 0);
|
178 |
+
const auto *p_sgx = source.gradx().ptr<unsigned char>(yys, 0);
|
179 |
+
const auto *p_tgx = target.gradx().ptr<unsigned char>(yyt, 0);
|
180 |
+
|
181 |
+
for (int dx = -patch_size; dx <= patch_size; ++dx) {
|
182 |
+
int xxs = xs + dx, xxt = xt + dx;
|
183 |
+
wsum += 1;
|
184 |
+
|
185 |
+
if (xxs <= 0 || xxs >= source_size.width - 1 || xxt <= 0 || xxt >= source_size.width - 1) {
|
186 |
+
distance += PatchSSDDistanceMetric::kSSDScale;
|
187 |
+
continue;
|
188 |
+
}
|
189 |
+
|
190 |
+
if (p_sm[xxs] || p_tm[xxt] || (p_sgm && p_sgm[xxs]) || (p_tgm && p_tgm[xxt]) ) {
|
191 |
+
distance += PatchSSDDistanceMetric::kSSDScale;
|
192 |
+
continue;
|
193 |
+
}
|
194 |
+
|
195 |
+
int ssd = 0;
|
196 |
+
for (int c = 0; c < 3; ++c) {
|
197 |
+
int s_value = p_si[xxs * 3 + c];
|
198 |
+
int t_value = p_ti[xxt * 3 + c];
|
199 |
+
int s_gy = p_sgy[xxs * 3 + c];
|
200 |
+
int t_gy = p_tgy[xxt * 3 + c];
|
201 |
+
int s_gx = p_sgx[xxs * 3 + c];
|
202 |
+
int t_gx = p_tgx[xxt * 3 + c];
|
203 |
+
|
204 |
+
ssd += pow2(static_cast<int>(s_value) - t_value);
|
205 |
+
ssd += pow2(static_cast<int>(s_gx) - t_gx);
|
206 |
+
ssd += pow2(static_cast<int>(s_gy) - t_gy);
|
207 |
+
}
|
208 |
+
distance += ssd;
|
209 |
+
}
|
210 |
+
}
|
211 |
+
|
212 |
+
distance /= (long double)(PatchSSDDistanceMetric::kSSDScale);
|
213 |
+
|
214 |
+
int res = int(PatchDistanceMetric::kDistanceScale * distance / wsum);
|
215 |
+
if (res < 0 || res > PatchDistanceMetric::kDistanceScale) return PatchDistanceMetric::kDistanceScale;
|
216 |
+
return res;
|
217 |
+
}
|
218 |
+
|
219 |
+
}
|
220 |
+
|
221 |
+
int PatchSSDDistanceMetric::operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const {
|
222 |
+
return distance_masked_images(source, source_y, source_x, target, target_y, target_x, m_patch_size);
|
223 |
+
}
|
224 |
+
|
225 |
+
int DebugPatchSSDDistanceMetric::operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const {
|
226 |
+
fprintf(stderr, "DebugPatchSSDDistanceMetric: %d %d %d %d\n", source.size().width, source.size().height, m_width, m_height);
|
227 |
+
return distance_masked_images(source, source_y, source_x, target, target_y, target_x, m_patch_size);
|
228 |
+
}
|
229 |
+
|
230 |
+
int RegularityGuidedPatchDistanceMetricV1::operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const {
|
231 |
+
double dx = remainder(double(source_x - target_x) / source.size().width, m_dx1);
|
232 |
+
double dy = remainder(double(source_y - target_y) / source.size().height, m_dy2);
|
233 |
+
|
234 |
+
double score1 = sqrt(dx * dx + dy *dy) / m_scale;
|
235 |
+
if (score1 < 0 || score1 > 1) score1 = 1;
|
236 |
+
score1 *= PatchDistanceMetric::kDistanceScale;
|
237 |
+
|
238 |
+
double score2 = distance_masked_images(source, source_y, source_x, target, target_y, target_x, m_patch_size);
|
239 |
+
double score = score1 * m_weight + score2 / (1 + m_weight);
|
240 |
+
return static_cast<int>(score / (1 + m_weight));
|
241 |
+
}
|
242 |
+
|
243 |
+
int RegularityGuidedPatchDistanceMetricV2::operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const {
|
244 |
+
if (target_y < 0 || target_y >= target.size().height || target_x < 0 || target_x >= target.size().width)
|
245 |
+
return PatchDistanceMetric::kDistanceScale;
|
246 |
+
|
247 |
+
int source_scale = m_ijmap.size().height / source.size().height;
|
248 |
+
int target_scale = m_ijmap.size().height / target.size().height;
|
249 |
+
|
250 |
+
// fprintf(stderr, "RegularityGuidedPatchDistanceMetricV2 %d %d %d %d\n", source_y * source_scale, m_ijmap.size().height, source_x * source_scale, m_ijmap.size().width);
|
251 |
+
|
252 |
+
double score1 = PatchDistanceMetric::kDistanceScale;
|
253 |
+
if (!source.is_globally_masked(source_y, source_x) && !target.is_globally_masked(target_y, target_x)) {
|
254 |
+
auto source_ij = m_ijmap.ptr<float>(source_y * source_scale, source_x * source_scale);
|
255 |
+
auto target_ij = m_ijmap.ptr<float>(target_y * target_scale, target_x * target_scale);
|
256 |
+
|
257 |
+
float di = fabs(source_ij[0] - target_ij[0]); if (di > 0.5) di = 1 - di;
|
258 |
+
float dj = fabs(source_ij[1] - target_ij[1]); if (dj > 0.5) dj = 1 - dj;
|
259 |
+
score1 = sqrt(di * di + dj *dj) / 0.707;
|
260 |
+
if (score1 < 0 || score1 > 1) score1 = 1;
|
261 |
+
score1 *= PatchDistanceMetric::kDistanceScale;
|
262 |
+
}
|
263 |
+
|
264 |
+
double score2 = distance_masked_images(source, source_y, source_x, target, target_y, target_x, m_patch_size);
|
265 |
+
double score = score1 * m_weight + score2;
|
266 |
+
return int(score / (1 + m_weight));
|
267 |
+
}
|
268 |
+
|
stablediffusion-infinity/PyPatchMatch/csrc/nnf.h
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <opencv2/core.hpp>
|
4 |
+
#include "masked_image.h"
|
5 |
+
|
6 |
+
class PatchDistanceMetric {
|
7 |
+
public:
|
8 |
+
PatchDistanceMetric(int patch_size) : m_patch_size(patch_size) {}
|
9 |
+
virtual ~PatchDistanceMetric() = default;
|
10 |
+
|
11 |
+
inline int patch_size() const { return m_patch_size; }
|
12 |
+
virtual int operator()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const = 0;
|
13 |
+
static const int kDistanceScale;
|
14 |
+
|
15 |
+
protected:
|
16 |
+
int m_patch_size;
|
17 |
+
};
|
18 |
+
|
19 |
+
class NearestNeighborField {
|
20 |
+
public:
|
21 |
+
NearestNeighborField() : m_source(), m_target(), m_field(), m_distance_metric(nullptr) {
|
22 |
+
// pass
|
23 |
+
}
|
24 |
+
NearestNeighborField(const MaskedImage &source, const MaskedImage &target, const PatchDistanceMetric *metric, int max_retry = 20)
|
25 |
+
: m_source(source), m_target(target), m_distance_metric(metric) {
|
26 |
+
m_field = cv::Mat(m_source.size(), CV_32SC3);
|
27 |
+
_randomize_field(max_retry);
|
28 |
+
}
|
29 |
+
NearestNeighborField(const MaskedImage &source, const MaskedImage &target, const PatchDistanceMetric *metric, const NearestNeighborField &other, int max_retry = 20)
|
30 |
+
: m_source(source), m_target(target), m_distance_metric(metric) {
|
31 |
+
m_field = cv::Mat(m_source.size(), CV_32SC3);
|
32 |
+
_initialize_field_from(other, max_retry);
|
33 |
+
}
|
34 |
+
|
35 |
+
const MaskedImage &source() const {
|
36 |
+
return m_source;
|
37 |
+
}
|
38 |
+
const MaskedImage &target() const {
|
39 |
+
return m_target;
|
40 |
+
}
|
41 |
+
inline cv::Size source_size() const {
|
42 |
+
return m_source.size();
|
43 |
+
}
|
44 |
+
inline cv::Size target_size() const {
|
45 |
+
return m_target.size();
|
46 |
+
}
|
47 |
+
inline void set_source(const MaskedImage &source) {
|
48 |
+
m_source = source;
|
49 |
+
}
|
50 |
+
inline void set_target(const MaskedImage &target) {
|
51 |
+
m_target = target;
|
52 |
+
}
|
53 |
+
|
54 |
+
inline int *mutable_ptr(int y, int x) {
|
55 |
+
return m_field.ptr<int>(y, x);
|
56 |
+
}
|
57 |
+
inline const int *ptr(int y, int x) const {
|
58 |
+
return m_field.ptr<int>(y, x);
|
59 |
+
}
|
60 |
+
|
61 |
+
inline int at(int y, int x, int c) const {
|
62 |
+
return m_field.ptr<int>(y, x)[c];
|
63 |
+
}
|
64 |
+
inline int &at(int y, int x, int c) {
|
65 |
+
return m_field.ptr<int>(y, x)[c];
|
66 |
+
}
|
67 |
+
inline void set_identity(int y, int x) {
|
68 |
+
auto ptr = mutable_ptr(y, x);
|
69 |
+
ptr[0] = y, ptr[1] = x, ptr[2] = 0;
|
70 |
+
}
|
71 |
+
|
72 |
+
void minimize(int nr_pass);
|
73 |
+
|
74 |
+
private:
|
75 |
+
inline int _distance(int source_y, int source_x, int target_y, int target_x) {
|
76 |
+
return (*m_distance_metric)(m_source, source_y, source_x, m_target, target_y, target_x);
|
77 |
+
}
|
78 |
+
|
79 |
+
void _randomize_field(int max_retry = 20, bool reset = true);
|
80 |
+
void _initialize_field_from(const NearestNeighborField &other, int max_retry);
|
81 |
+
void _minimize_link(int y, int x, int direction);
|
82 |
+
|
83 |
+
MaskedImage m_source;
|
84 |
+
MaskedImage m_target;
|
85 |
+
cv::Mat m_field; // { y_target, x_target, distance_scaled }
|
86 |
+
const PatchDistanceMetric *m_distance_metric;
|
87 |
+
};
|
88 |
+
|
89 |
+
|
90 |
+
class PatchSSDDistanceMetric : public PatchDistanceMetric {
|
91 |
+
public:
|
92 |
+
using PatchDistanceMetric::PatchDistanceMetric;
|
93 |
+
virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const;
|
94 |
+
static const int kSSDScale;
|
95 |
+
};
|
96 |
+
|
97 |
+
class DebugPatchSSDDistanceMetric : public PatchDistanceMetric {
|
98 |
+
public:
|
99 |
+
DebugPatchSSDDistanceMetric(int patch_size, int width, int height) : PatchDistanceMetric(patch_size), m_width(width), m_height(height) {}
|
100 |
+
virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const;
|
101 |
+
protected:
|
102 |
+
int m_width, m_height;
|
103 |
+
};
|
104 |
+
|
105 |
+
class RegularityGuidedPatchDistanceMetricV1 : public PatchDistanceMetric {
|
106 |
+
public:
|
107 |
+
RegularityGuidedPatchDistanceMetricV1(int patch_size, double dx1, double dy1, double dx2, double dy2, double weight)
|
108 |
+
: PatchDistanceMetric(patch_size), m_dx1(dx1), m_dy1(dy1), m_dx2(dx2), m_dy2(dy2), m_weight(weight) {
|
109 |
+
|
110 |
+
assert(m_dy1 == 0);
|
111 |
+
assert(m_dx2 == 0);
|
112 |
+
m_scale = sqrt(m_dx1 * m_dx1 + m_dy2 * m_dy2) / 4;
|
113 |
+
}
|
114 |
+
virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const;
|
115 |
+
|
116 |
+
protected:
|
117 |
+
double m_dx1, m_dy1, m_dx2, m_dy2;
|
118 |
+
double m_scale, m_weight;
|
119 |
+
};
|
120 |
+
|
121 |
+
class RegularityGuidedPatchDistanceMetricV2 : public PatchDistanceMetric {
|
122 |
+
public:
|
123 |
+
RegularityGuidedPatchDistanceMetricV2(int patch_size, cv::Mat ijmap, double weight)
|
124 |
+
: PatchDistanceMetric(patch_size), m_ijmap(ijmap), m_weight(weight) {
|
125 |
+
|
126 |
+
}
|
127 |
+
virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const;
|
128 |
+
|
129 |
+
protected:
|
130 |
+
cv::Mat m_ijmap;
|
131 |
+
double m_width, m_height, m_weight;
|
132 |
+
};
|
133 |
+
|
stablediffusion-infinity/PyPatchMatch/csrc/pyinterface.cpp
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include "pyinterface.h"
|
2 |
+
#include "inpaint.h"
|
3 |
+
|
4 |
+
static unsigned int PM_seed = 1212;
|
5 |
+
static bool PM_verbose = false;
|
6 |
+
|
7 |
+
int _dtype_py_to_cv(int dtype_py);
|
8 |
+
int _dtype_cv_to_py(int dtype_cv);
|
9 |
+
cv::Mat _py_to_cv2(PM_mat_t pymat);
|
10 |
+
PM_mat_t _cv2_to_py(cv::Mat cvmat);
|
11 |
+
|
12 |
+
void PM_set_random_seed(unsigned int seed) {
|
13 |
+
PM_seed = seed;
|
14 |
+
}
|
15 |
+
|
16 |
+
void PM_set_verbose(int value) {
|
17 |
+
PM_verbose = static_cast<bool>(value);
|
18 |
+
}
|
19 |
+
|
20 |
+
void PM_free_pymat(PM_mat_t pymat) {
|
21 |
+
free(pymat.data_ptr);
|
22 |
+
}
|
23 |
+
|
24 |
+
PM_mat_t PM_inpaint(PM_mat_t source_py, PM_mat_t mask_py, int patch_size) {
|
25 |
+
cv::Mat source = _py_to_cv2(source_py);
|
26 |
+
cv::Mat mask = _py_to_cv2(mask_py);
|
27 |
+
auto metric = PatchSSDDistanceMetric(patch_size);
|
28 |
+
cv::Mat result = Inpainting(source, mask, &metric).run(PM_verbose, false, PM_seed);
|
29 |
+
return _cv2_to_py(result);
|
30 |
+
}
|
31 |
+
|
32 |
+
PM_mat_t PM_inpaint_regularity(PM_mat_t source_py, PM_mat_t mask_py, PM_mat_t ijmap_py, int patch_size, float guide_weight) {
|
33 |
+
cv::Mat source = _py_to_cv2(source_py);
|
34 |
+
cv::Mat mask = _py_to_cv2(mask_py);
|
35 |
+
cv::Mat ijmap = _py_to_cv2(ijmap_py);
|
36 |
+
|
37 |
+
auto metric = RegularityGuidedPatchDistanceMetricV2(patch_size, ijmap, guide_weight);
|
38 |
+
cv::Mat result = Inpainting(source, mask, &metric).run(PM_verbose, false, PM_seed);
|
39 |
+
return _cv2_to_py(result);
|
40 |
+
}
|
41 |
+
|
42 |
+
PM_mat_t PM_inpaint2(PM_mat_t source_py, PM_mat_t mask_py, PM_mat_t global_mask_py, int patch_size) {
|
43 |
+
cv::Mat source = _py_to_cv2(source_py);
|
44 |
+
cv::Mat mask = _py_to_cv2(mask_py);
|
45 |
+
cv::Mat global_mask = _py_to_cv2(global_mask_py);
|
46 |
+
|
47 |
+
auto metric = PatchSSDDistanceMetric(patch_size);
|
48 |
+
cv::Mat result = Inpainting(source, mask, global_mask, &metric).run(PM_verbose, false, PM_seed);
|
49 |
+
return _cv2_to_py(result);
|
50 |
+
}
|
51 |
+
|
52 |
+
PM_mat_t PM_inpaint2_regularity(PM_mat_t source_py, PM_mat_t mask_py, PM_mat_t global_mask_py, PM_mat_t ijmap_py, int patch_size, float guide_weight) {
|
53 |
+
cv::Mat source = _py_to_cv2(source_py);
|
54 |
+
cv::Mat mask = _py_to_cv2(mask_py);
|
55 |
+
cv::Mat global_mask = _py_to_cv2(global_mask_py);
|
56 |
+
cv::Mat ijmap = _py_to_cv2(ijmap_py);
|
57 |
+
|
58 |
+
auto metric = RegularityGuidedPatchDistanceMetricV2(patch_size, ijmap, guide_weight);
|
59 |
+
cv::Mat result = Inpainting(source, mask, global_mask, &metric).run(PM_verbose, false, PM_seed);
|
60 |
+
return _cv2_to_py(result);
|
61 |
+
}
|
62 |
+
|
63 |
+
int _dtype_py_to_cv(int dtype_py) {
|
64 |
+
switch (dtype_py) {
|
65 |
+
case PM_UINT8: return CV_8U;
|
66 |
+
case PM_INT8: return CV_8S;
|
67 |
+
case PM_UINT16: return CV_16U;
|
68 |
+
case PM_INT16: return CV_16S;
|
69 |
+
case PM_INT32: return CV_32S;
|
70 |
+
case PM_FLOAT32: return CV_32F;
|
71 |
+
case PM_FLOAT64: return CV_64F;
|
72 |
+
}
|
73 |
+
|
74 |
+
return CV_8U;
|
75 |
+
}
|
76 |
+
|
77 |
+
int _dtype_cv_to_py(int dtype_cv) {
|
78 |
+
switch (dtype_cv) {
|
79 |
+
case CV_8U: return PM_UINT8;
|
80 |
+
case CV_8S: return PM_INT8;
|
81 |
+
case CV_16U: return PM_UINT16;
|
82 |
+
case CV_16S: return PM_INT16;
|
83 |
+
case CV_32S: return PM_INT32;
|
84 |
+
case CV_32F: return PM_FLOAT32;
|
85 |
+
case CV_64F: return PM_FLOAT64;
|
86 |
+
}
|
87 |
+
|
88 |
+
return PM_UINT8;
|
89 |
+
}
|
90 |
+
|
91 |
+
cv::Mat _py_to_cv2(PM_mat_t pymat) {
|
92 |
+
int dtype = _dtype_py_to_cv(pymat.dtype);
|
93 |
+
dtype = CV_MAKETYPE(pymat.dtype, pymat.shape.channels);
|
94 |
+
return cv::Mat(cv::Size(pymat.shape.width, pymat.shape.height), dtype, pymat.data_ptr).clone();
|
95 |
+
}
|
96 |
+
|
97 |
+
PM_mat_t _cv2_to_py(cv::Mat cvmat) {
|
98 |
+
PM_shape_t shape = {cvmat.size().width, cvmat.size().height, cvmat.channels()};
|
99 |
+
int dtype = _dtype_cv_to_py(cvmat.depth());
|
100 |
+
size_t dsize = cvmat.total() * cvmat.elemSize();
|
101 |
+
|
102 |
+
void *data_ptr = reinterpret_cast<void *>(malloc(dsize));
|
103 |
+
memcpy(data_ptr, reinterpret_cast<void *>(cvmat.data), dsize);
|
104 |
+
|
105 |
+
return PM_mat_t {data_ptr, shape, dtype};
|
106 |
+
}
|
107 |
+
|
stablediffusion-infinity/PyPatchMatch/csrc/pyinterface.h
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <opencv2/core.hpp>
|
2 |
+
#include <cstdlib>
|
3 |
+
#include <cstdio>
|
4 |
+
#include <cstring>
|
5 |
+
|
6 |
+
extern "C" {
|
7 |
+
|
8 |
+
struct PM_shape_t {
|
9 |
+
int width, height, channels;
|
10 |
+
};
|
11 |
+
|
12 |
+
enum PM_dtype_e {
|
13 |
+
PM_UINT8,
|
14 |
+
PM_INT8,
|
15 |
+
PM_UINT16,
|
16 |
+
PM_INT16,
|
17 |
+
PM_INT32,
|
18 |
+
PM_FLOAT32,
|
19 |
+
PM_FLOAT64,
|
20 |
+
};
|
21 |
+
|
22 |
+
struct PM_mat_t {
|
23 |
+
void *data_ptr;
|
24 |
+
PM_shape_t shape;
|
25 |
+
int dtype;
|
26 |
+
};
|
27 |
+
|
28 |
+
void PM_set_random_seed(unsigned int seed);
|
29 |
+
void PM_set_verbose(int value);
|
30 |
+
|
31 |
+
void PM_free_pymat(PM_mat_t pymat);
|
32 |
+
PM_mat_t PM_inpaint(PM_mat_t image, PM_mat_t mask, int patch_size);
|
33 |
+
PM_mat_t PM_inpaint_regularity(PM_mat_t image, PM_mat_t mask, PM_mat_t ijmap, int patch_size, float guide_weight);
|
34 |
+
PM_mat_t PM_inpaint2(PM_mat_t image, PM_mat_t mask, PM_mat_t global_mask, int patch_size);
|
35 |
+
PM_mat_t PM_inpaint2_regularity(PM_mat_t image, PM_mat_t mask, PM_mat_t global_mask, PM_mat_t ijmap, int patch_size, float guide_weight);
|
36 |
+
|
37 |
+
} /* extern "C" */
|
38 |
+
|
stablediffusion-infinity/PyPatchMatch/examples/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
/cpp_example.exe
|
2 |
+
/images/*recovered.bmp
|
stablediffusion-infinity/PyPatchMatch/examples/cpp_example.cpp
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <iostream>
|
2 |
+
#include <opencv2/imgcodecs.hpp>
|
3 |
+
#include <opencv2/highgui.hpp>
|
4 |
+
|
5 |
+
#include "masked_image.h"
|
6 |
+
#include "nnf.h"
|
7 |
+
#include "inpaint.h"
|
8 |
+
|
9 |
+
int main() {
|
10 |
+
auto source = cv::imread("./images/forest_pruned.bmp", cv::IMREAD_COLOR);
|
11 |
+
|
12 |
+
auto mask = cv::Mat(source.size(), CV_8UC1);
|
13 |
+
mask = cv::Scalar::all(0);
|
14 |
+
for (int i = 0; i < source.size().height; ++i) {
|
15 |
+
for (int j = 0; j < source.size().width; ++j) {
|
16 |
+
auto source_ptr = source.ptr<unsigned char>(i, j);
|
17 |
+
if (source_ptr[0] == 255 && source_ptr[1] == 255 && source_ptr[2] == 255) {
|
18 |
+
mask.at<unsigned char>(i, j) = 1;
|
19 |
+
}
|
20 |
+
}
|
21 |
+
}
|
22 |
+
|
23 |
+
auto metric = PatchSSDDistanceMetric(3);
|
24 |
+
auto result = Inpainting(source, mask, &metric).run(true, true);
|
25 |
+
// cv::imwrite("./images/forest_recovered.bmp", result);
|
26 |
+
// cv::imshow("Result", result);
|
27 |
+
// cv::waitKey();
|
28 |
+
|
29 |
+
return 0;
|
30 |
+
}
|
31 |
+
|
stablediffusion-infinity/PyPatchMatch/examples/cpp_example_run.sh
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /bin/bash
|
2 |
+
#
|
3 |
+
# cpp_example_run.sh
|
4 |
+
# Copyright (C) 2020 Jiayuan Mao <maojiayuan@gmail.com>
|
5 |
+
#
|
6 |
+
# Distributed under terms of the MIT license.
|
7 |
+
#
|
8 |
+
|
9 |
+
set -x
|
10 |
+
|
11 |
+
CFLAGS="-std=c++14 -O2 $(pkg-config --cflags opencv)"
|
12 |
+
LDFLAGS="$(pkg-config --libs opencv)"
|
13 |
+
g++ $CFLAGS cpp_example.cpp -I../csrc/ -L../ -lpatchmatch $LDFLAGS -o cpp_example.exe
|
14 |
+
|
15 |
+
export DYLD_LIBRARY_PATH=../:$DYLD_LIBRARY_PATH # For macOS
|
16 |
+
export LD_LIBRARY_PATH=../:$LD_LIBRARY_PATH # For Linux
|
17 |
+
time ./cpp_example.exe
|
18 |
+
|
stablediffusion-infinity/PyPatchMatch/examples/images/forest.bmp
ADDED
stablediffusion-infinity/PyPatchMatch/examples/images/forest_pruned.bmp
ADDED
stablediffusion-infinity/PyPatchMatch/examples/py_example.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# File : test.py
|
4 |
+
# Author : Jiayuan Mao
|
5 |
+
# Email : maojiayuan@gmail.com
|
6 |
+
# Date : 01/09/2020
|
7 |
+
#
|
8 |
+
# Distributed under terms of the MIT license.
|
9 |
+
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
import sys
|
13 |
+
sys.path.insert(0, '../')
|
14 |
+
import patch_match
|
15 |
+
|
16 |
+
|
17 |
+
if __name__ == '__main__':
|
18 |
+
source = Image.open('./images/forest_pruned.bmp')
|
19 |
+
result = patch_match.inpaint(source, patch_size=3)
|
20 |
+
Image.fromarray(result).save('./images/forest_recovered.bmp')
|
21 |
+
|
stablediffusion-infinity/PyPatchMatch/examples/py_example_global_mask.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# File : test.py
|
4 |
+
# Author : Jiayuan Mao
|
5 |
+
# Email : maojiayuan@gmail.com
|
6 |
+
# Date : 01/09/2020
|
7 |
+
#
|
8 |
+
# Distributed under terms of the MIT license.
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
from PIL import Image
|
12 |
+
|
13 |
+
import sys
|
14 |
+
sys.path.insert(0, '../')
|
15 |
+
import patch_match
|
16 |
+
|
17 |
+
|
18 |
+
if __name__ == '__main__':
|
19 |
+
patch_match.set_verbose(True)
|
20 |
+
source = Image.open('./images/forest_pruned.bmp')
|
21 |
+
source = np.array(source)
|
22 |
+
source[:100, :100] = 255
|
23 |
+
global_mask = np.zeros_like(source[..., 0])
|
24 |
+
global_mask[:100, :100] = 1
|
25 |
+
result = patch_match.inpaint(source, global_mask=global_mask, patch_size=3)
|
26 |
+
Image.fromarray(result).save('./images/forest_recovered.bmp')
|
27 |
+
|
stablediffusion-infinity/PyPatchMatch/opencv.pc
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
prefix=/usr
|
2 |
+
exec_prefix=${prefix}
|
3 |
+
includedir=${prefix}/include
|
4 |
+
libdir=${exec_prefix}/lib
|
5 |
+
|
6 |
+
Name: opencv
|
7 |
+
Description: The opencv library
|
8 |
+
Version: 2.x.x
|
9 |
+
Cflags: -I${includedir}/opencv4
|
10 |
+
#Cflags: -I${includedir}/opencv -I${includedir}/opencv2
|
11 |
+
Libs: -L${libdir} -lopencv_calib3d -lopencv_imgproc -lopencv_xobjdetect -lopencv_hdf -lopencv_flann -lopencv_core -lopencv_dpm -lopencv_videoio -lopencv_reg -lopencv_objdetect -lopencv_stitching -lopencv_saliency -lopencv_features2d -lopencv_text -lopencv_calib3d -lopencv_line_descriptor -lopencv_superres -lopencv_ml -lopencv_viz -lopencv_optflow -lopencv_videostab -lopencv_bioinspired -lopencv_highgui -lopencv_freetype -lopencv_imgcodecs -lopencv_video -lopencv_photo -lopencv_surface_matching -lopencv_rgbd -lopencv_datasets -lopencv_ximgproc -lopencv_plot -lopencv_face -lopencv_stereo -lopencv_aruco -lopencv_phase_unwrapping -lopencv_bgsegm -lopencv_ccalib -lopencv_imgproc -lopencv_shape -lopencv_xphoto -lopencv_structured_light -lopencv_fuzzy
|
stablediffusion-infinity/PyPatchMatch/patch_match.py
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# File : patch_match.py
|
4 |
+
# Author : Jiayuan Mao
|
5 |
+
# Email : maojiayuan@gmail.com
|
6 |
+
# Date : 01/09/2020
|
7 |
+
#
|
8 |
+
# Distributed under terms of the MIT license.
|
9 |
+
|
10 |
+
import ctypes
|
11 |
+
import os.path as osp
|
12 |
+
from typing import Optional, Union
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
from PIL import Image
|
16 |
+
|
17 |
+
|
18 |
+
__all__ = ['set_random_seed', 'set_verbose', 'inpaint', 'inpaint_regularity']
|
19 |
+
|
20 |
+
|
21 |
+
class CShapeT(ctypes.Structure):
|
22 |
+
_fields_ = [
|
23 |
+
('width', ctypes.c_int),
|
24 |
+
('height', ctypes.c_int),
|
25 |
+
('channels', ctypes.c_int),
|
26 |
+
]
|
27 |
+
|
28 |
+
|
29 |
+
class CMatT(ctypes.Structure):
|
30 |
+
_fields_ = [
|
31 |
+
('data_ptr', ctypes.c_void_p),
|
32 |
+
('shape', CShapeT),
|
33 |
+
('dtype', ctypes.c_int)
|
34 |
+
]
|
35 |
+
|
36 |
+
|
37 |
+
PMLIB = ctypes.CDLL(osp.join(osp.dirname(__file__), 'libpatchmatch.so'))
|
38 |
+
|
39 |
+
PMLIB.PM_set_random_seed.argtypes = [ctypes.c_uint]
|
40 |
+
PMLIB.PM_set_verbose.argtypes = [ctypes.c_int]
|
41 |
+
PMLIB.PM_free_pymat.argtypes = [CMatT]
|
42 |
+
PMLIB.PM_inpaint.argtypes = [CMatT, CMatT, ctypes.c_int]
|
43 |
+
PMLIB.PM_inpaint.restype = CMatT
|
44 |
+
PMLIB.PM_inpaint_regularity.argtypes = [CMatT, CMatT, CMatT, ctypes.c_int, ctypes.c_float]
|
45 |
+
PMLIB.PM_inpaint_regularity.restype = CMatT
|
46 |
+
PMLIB.PM_inpaint2.argtypes = [CMatT, CMatT, CMatT, ctypes.c_int]
|
47 |
+
PMLIB.PM_inpaint2.restype = CMatT
|
48 |
+
PMLIB.PM_inpaint2_regularity.argtypes = [CMatT, CMatT, CMatT, CMatT, ctypes.c_int, ctypes.c_float]
|
49 |
+
PMLIB.PM_inpaint2_regularity.restype = CMatT
|
50 |
+
|
51 |
+
|
52 |
+
def set_random_seed(seed: int):
|
53 |
+
PMLIB.PM_set_random_seed(ctypes.c_uint(seed))
|
54 |
+
|
55 |
+
|
56 |
+
def set_verbose(verbose: bool):
|
57 |
+
PMLIB.PM_set_verbose(ctypes.c_int(verbose))
|
58 |
+
|
59 |
+
|
60 |
+
def inpaint(
|
61 |
+
image: Union[np.ndarray, Image.Image],
|
62 |
+
mask: Optional[Union[np.ndarray, Image.Image]] = None,
|
63 |
+
*,
|
64 |
+
global_mask: Optional[Union[np.ndarray, Image.Image]] = None,
|
65 |
+
patch_size: int = 15
|
66 |
+
) -> np.ndarray:
|
67 |
+
"""
|
68 |
+
PatchMatch based inpainting proposed in:
|
69 |
+
|
70 |
+
PatchMatch : A Randomized Correspondence Algorithm for Structural Image Editing
|
71 |
+
C.Barnes, E.Shechtman, A.Finkelstein and Dan B.Goldman
|
72 |
+
SIGGRAPH 2009
|
73 |
+
|
74 |
+
Args:
|
75 |
+
image (Union[np.ndarray, Image.Image]): the input image, should be 3-channel RGB/BGR.
|
76 |
+
mask (Union[np.array, Image.Image], optional): the mask of the hole(s) to be filled, should be 1-channel.
|
77 |
+
If not provided (None), the algorithm will treat all purely white pixels as the holes (255, 255, 255).
|
78 |
+
global_mask (Union[np.array, Image.Image], optional): the target mask of the output image.
|
79 |
+
patch_size (int): the patch size for the inpainting algorithm.
|
80 |
+
|
81 |
+
Return:
|
82 |
+
result (np.ndarray): the repaired image, of the same size as the input image.
|
83 |
+
"""
|
84 |
+
|
85 |
+
if isinstance(image, Image.Image):
|
86 |
+
image = np.array(image)
|
87 |
+
image = np.ascontiguousarray(image)
|
88 |
+
assert image.ndim == 3 and image.shape[2] == 3 and image.dtype == 'uint8'
|
89 |
+
|
90 |
+
if mask is None:
|
91 |
+
mask = (image == (255, 255, 255)).all(axis=2, keepdims=True).astype('uint8')
|
92 |
+
mask = np.ascontiguousarray(mask)
|
93 |
+
else:
|
94 |
+
mask = _canonize_mask_array(mask)
|
95 |
+
|
96 |
+
if global_mask is None:
|
97 |
+
ret_pymat = PMLIB.PM_inpaint(np_to_pymat(image), np_to_pymat(mask), ctypes.c_int(patch_size))
|
98 |
+
else:
|
99 |
+
global_mask = _canonize_mask_array(global_mask)
|
100 |
+
ret_pymat = PMLIB.PM_inpaint2(np_to_pymat(image), np_to_pymat(mask), np_to_pymat(global_mask), ctypes.c_int(patch_size))
|
101 |
+
|
102 |
+
ret_npmat = pymat_to_np(ret_pymat)
|
103 |
+
PMLIB.PM_free_pymat(ret_pymat)
|
104 |
+
|
105 |
+
return ret_npmat
|
106 |
+
|
107 |
+
|
108 |
+
def inpaint_regularity(
|
109 |
+
image: Union[np.ndarray, Image.Image],
|
110 |
+
mask: Optional[Union[np.ndarray, Image.Image]],
|
111 |
+
ijmap: np.ndarray,
|
112 |
+
*,
|
113 |
+
global_mask: Optional[Union[np.ndarray, Image.Image]] = None,
|
114 |
+
patch_size: int = 15, guide_weight: float = 0.25
|
115 |
+
) -> np.ndarray:
|
116 |
+
if isinstance(image, Image.Image):
|
117 |
+
image = np.array(image)
|
118 |
+
image = np.ascontiguousarray(image)
|
119 |
+
|
120 |
+
assert isinstance(ijmap, np.ndarray) and ijmap.ndim == 3 and ijmap.shape[2] == 3 and ijmap.dtype == 'float32'
|
121 |
+
ijmap = np.ascontiguousarray(ijmap)
|
122 |
+
|
123 |
+
assert image.ndim == 3 and image.shape[2] == 3 and image.dtype == 'uint8'
|
124 |
+
if mask is None:
|
125 |
+
mask = (image == (255, 255, 255)).all(axis=2, keepdims=True).astype('uint8')
|
126 |
+
mask = np.ascontiguousarray(mask)
|
127 |
+
else:
|
128 |
+
mask = _canonize_mask_array(mask)
|
129 |
+
|
130 |
+
|
131 |
+
if global_mask is None:
|
132 |
+
ret_pymat = PMLIB.PM_inpaint_regularity(np_to_pymat(image), np_to_pymat(mask), np_to_pymat(ijmap), ctypes.c_int(patch_size), ctypes.c_float(guide_weight))
|
133 |
+
else:
|
134 |
+
global_mask = _canonize_mask_array(global_mask)
|
135 |
+
ret_pymat = PMLIB.PM_inpaint2_regularity(np_to_pymat(image), np_to_pymat(mask), np_to_pymat(global_mask), np_to_pymat(ijmap), ctypes.c_int(patch_size), ctypes.c_float(guide_weight))
|
136 |
+
|
137 |
+
ret_npmat = pymat_to_np(ret_pymat)
|
138 |
+
PMLIB.PM_free_pymat(ret_pymat)
|
139 |
+
|
140 |
+
return ret_npmat
|
141 |
+
|
142 |
+
|
143 |
+
def _canonize_mask_array(mask):
|
144 |
+
if isinstance(mask, Image.Image):
|
145 |
+
mask = np.array(mask)
|
146 |
+
if mask.ndim == 2 and mask.dtype == 'uint8':
|
147 |
+
mask = mask[..., np.newaxis]
|
148 |
+
assert mask.ndim == 3 and mask.shape[2] == 1 and mask.dtype == 'uint8'
|
149 |
+
return np.ascontiguousarray(mask)
|
150 |
+
|
151 |
+
|
152 |
+
dtype_pymat_to_ctypes = [
|
153 |
+
ctypes.c_uint8,
|
154 |
+
ctypes.c_int8,
|
155 |
+
ctypes.c_uint16,
|
156 |
+
ctypes.c_int16,
|
157 |
+
ctypes.c_int32,
|
158 |
+
ctypes.c_float,
|
159 |
+
ctypes.c_double,
|
160 |
+
]
|
161 |
+
|
162 |
+
|
163 |
+
dtype_np_to_pymat = {
|
164 |
+
'uint8': 0,
|
165 |
+
'int8': 1,
|
166 |
+
'uint16': 2,
|
167 |
+
'int16': 3,
|
168 |
+
'int32': 4,
|
169 |
+
'float32': 5,
|
170 |
+
'float64': 6,
|
171 |
+
}
|
172 |
+
|
173 |
+
|
174 |
+
def np_to_pymat(npmat):
|
175 |
+
assert npmat.ndim == 3
|
176 |
+
return CMatT(
|
177 |
+
ctypes.cast(npmat.ctypes.data, ctypes.c_void_p),
|
178 |
+
CShapeT(npmat.shape[1], npmat.shape[0], npmat.shape[2]),
|
179 |
+
dtype_np_to_pymat[str(npmat.dtype)]
|
180 |
+
)
|
181 |
+
|
182 |
+
|
183 |
+
def pymat_to_np(pymat):
|
184 |
+
npmat = np.ctypeslib.as_array(
|
185 |
+
ctypes.cast(pymat.data_ptr, ctypes.POINTER(dtype_pymat_to_ctypes[pymat.dtype])),
|
186 |
+
(pymat.shape.height, pymat.shape.width, pymat.shape.channels)
|
187 |
+
)
|
188 |
+
ret = np.empty(npmat.shape, npmat.dtype)
|
189 |
+
ret[:] = npmat
|
190 |
+
return ret
|
191 |
+
|
stablediffusion-infinity/app.py
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import base64
|
3 |
+
import os
|
4 |
+
from random import sample
|
5 |
+
from sched import scheduler
|
6 |
+
|
7 |
+
import uvicorn
|
8 |
+
from fastapi import FastAPI, Response
|
9 |
+
from fastapi.staticfiles import StaticFiles
|
10 |
+
|
11 |
+
import httpx
|
12 |
+
from urllib.parse import urljoin
|
13 |
+
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
import torch
|
17 |
+
from torch import autocast
|
18 |
+
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline
|
19 |
+
from PIL import Image
|
20 |
+
from PIL import ImageOps
|
21 |
+
import gradio as gr
|
22 |
+
import base64
|
23 |
+
import skimage
|
24 |
+
import skimage.measure
|
25 |
+
from utils import *
|
26 |
+
|
27 |
+
app = FastAPI()
|
28 |
+
|
29 |
+
auth_token = os.environ.get("API_TOKEN") or True
|
30 |
+
|
31 |
+
WHITES = 66846720
|
32 |
+
MASK = Image.open("mask.png")
|
33 |
+
try:
|
34 |
+
SAMPLING_MODE = Image.Resampling.LANCZOS
|
35 |
+
except Exception as e:
|
36 |
+
SAMPLING_MODE = Image.LANCZOS
|
37 |
+
|
38 |
+
|
39 |
+
blocks = gr.Blocks().queue()
|
40 |
+
model = {}
|
41 |
+
|
42 |
+
|
43 |
+
def get_model():
|
44 |
+
if "text2img" not in model:
|
45 |
+
text2img = StableDiffusionPipeline.from_pretrained(
|
46 |
+
"CompVis/stable-diffusion-v1-4",
|
47 |
+
revision="fp16",
|
48 |
+
torch_dtype=torch.float16,
|
49 |
+
use_auth_token=auth_token,
|
50 |
+
).to("cuda")
|
51 |
+
inpaint = StableDiffusionInpaintPipeline(
|
52 |
+
vae=text2img.vae,
|
53 |
+
text_encoder=text2img.text_encoder,
|
54 |
+
tokenizer=text2img.tokenizer,
|
55 |
+
unet=text2img.unet,
|
56 |
+
scheduler=text2img.scheduler,
|
57 |
+
safety_checker=text2img.safety_checker,
|
58 |
+
feature_extractor=text2img.feature_extractor,
|
59 |
+
).to("cuda")
|
60 |
+
|
61 |
+
# lms = LMSDiscreteScheduler(
|
62 |
+
# beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")
|
63 |
+
|
64 |
+
# img2img = StableDiffusionImg2ImgPipeline(
|
65 |
+
# vae=text2img.vae,
|
66 |
+
# text_encoder=text2img.text_encoder,
|
67 |
+
# tokenizer=text2img.tokenizer,
|
68 |
+
# unet=text2img.unet,
|
69 |
+
# scheduler=lms,
|
70 |
+
# safety_checker=text2img.safety_checker,
|
71 |
+
# feature_extractor=text2img.feature_extractor,
|
72 |
+
# ).to("cuda")
|
73 |
+
# try:
|
74 |
+
# total_memory = torch.cuda.get_device_properties(0).total_memory // (
|
75 |
+
# 1024 ** 3
|
76 |
+
# )
|
77 |
+
# if total_memory <= 5:
|
78 |
+
# inpaint.enable_attention_slicing()
|
79 |
+
# except:
|
80 |
+
# pass
|
81 |
+
model["text2img"] = text2img
|
82 |
+
model["inpaint"] = inpaint
|
83 |
+
# model["img2img"] = img2img
|
84 |
+
|
85 |
+
return model["text2img"], model["inpaint"]
|
86 |
+
# model["img2img"]
|
87 |
+
|
88 |
+
|
89 |
+
get_model()
|
90 |
+
|
91 |
+
|
92 |
+
def run_outpaint(
|
93 |
+
input_image,
|
94 |
+
prompt_text,
|
95 |
+
strength,
|
96 |
+
guidance,
|
97 |
+
step,
|
98 |
+
fill_mode,
|
99 |
+
):
|
100 |
+
text2img, inpaint = get_model()
|
101 |
+
sel_buffer = np.array(input_image)
|
102 |
+
img = sel_buffer[:, :, 0:3]
|
103 |
+
mask = sel_buffer[:, :, -1]
|
104 |
+
process_size = 512
|
105 |
+
|
106 |
+
mask_sum = mask.sum()
|
107 |
+
if mask_sum >= WHITES:
|
108 |
+
print("inpaiting with fixed Mask")
|
109 |
+
mask = np.array(MASK)[:, :, 0]
|
110 |
+
img, mask = functbl[fill_mode](img, mask)
|
111 |
+
init_image = Image.fromarray(img)
|
112 |
+
mask = 255 - mask
|
113 |
+
mask = skimage.measure.block_reduce(mask, (8, 8), np.max)
|
114 |
+
mask = mask.repeat(8, axis=0).repeat(8, axis=1)
|
115 |
+
mask_image = Image.fromarray(mask)
|
116 |
+
|
117 |
+
# mask_image=mask_image.filter(ImageFilter.GaussianBlur(radius = 8))
|
118 |
+
with autocast("cuda"):
|
119 |
+
images = inpaint(
|
120 |
+
prompt=prompt_text,
|
121 |
+
init_image=init_image.resize(
|
122 |
+
(process_size, process_size), resample=SAMPLING_MODE
|
123 |
+
),
|
124 |
+
mask_image=mask_image.resize((process_size, process_size)),
|
125 |
+
strength=strength,
|
126 |
+
num_inference_steps=step,
|
127 |
+
guidance_scale=guidance,
|
128 |
+
)
|
129 |
+
elif mask_sum > 0 and mask_sum < WHITES:
|
130 |
+
print("inpainting")
|
131 |
+
img, mask = functbl[fill_mode](img, mask)
|
132 |
+
init_image = Image.fromarray(img)
|
133 |
+
mask = 255 - mask
|
134 |
+
mask = skimage.measure.block_reduce(mask, (8, 8), np.max)
|
135 |
+
mask = mask.repeat(8, axis=0).repeat(8, axis=1)
|
136 |
+
mask_image = Image.fromarray(mask)
|
137 |
+
|
138 |
+
# mask_image=mask_image.filter(ImageFilter.GaussianBlur(radius = 8))
|
139 |
+
with autocast("cuda"):
|
140 |
+
images = inpaint(
|
141 |
+
prompt=prompt_text,
|
142 |
+
init_image=init_image.resize(
|
143 |
+
(process_size, process_size), resample=SAMPLING_MODE
|
144 |
+
),
|
145 |
+
mask_image=mask_image.resize((process_size, process_size)),
|
146 |
+
strength=strength,
|
147 |
+
num_inference_steps=step,
|
148 |
+
guidance_scale=guidance,
|
149 |
+
)
|
150 |
+
else:
|
151 |
+
print("text2image")
|
152 |
+
with autocast("cuda"):
|
153 |
+
images = text2img(
|
154 |
+
prompt=prompt_text, height=process_size, width=process_size,
|
155 |
+
)
|
156 |
+
|
157 |
+
return images['sample'][0], images["nsfw_content_detected"][0]
|
158 |
+
|
159 |
+
|
160 |
+
with blocks as demo:
|
161 |
+
|
162 |
+
with gr.Row():
|
163 |
+
|
164 |
+
with gr.Column(scale=3, min_width=270):
|
165 |
+
sd_prompt = gr.Textbox(
|
166 |
+
label="Prompt", placeholder="input your prompt here", lines=4
|
167 |
+
)
|
168 |
+
with gr.Column(scale=2, min_width=150):
|
169 |
+
sd_strength = gr.Slider(
|
170 |
+
label="Strength", minimum=0.0, maximum=1.0, value=0.75, step=0.01
|
171 |
+
)
|
172 |
+
with gr.Column(scale=1, min_width=150):
|
173 |
+
sd_step = gr.Number(label="Step", value=50, precision=0)
|
174 |
+
sd_guidance = gr.Number(label="Guidance", value=7.5)
|
175 |
+
with gr.Row():
|
176 |
+
with gr.Column(scale=4, min_width=600):
|
177 |
+
init_mode = gr.Radio(
|
178 |
+
label="Init mode",
|
179 |
+
choices=[
|
180 |
+
"patchmatch",
|
181 |
+
"edge_pad",
|
182 |
+
"cv2_ns",
|
183 |
+
"cv2_telea",
|
184 |
+
"gaussian",
|
185 |
+
"perlin",
|
186 |
+
],
|
187 |
+
value="patchmatch",
|
188 |
+
type="value",
|
189 |
+
)
|
190 |
+
|
191 |
+
model_input = gr.Image(label="Input", type="pil", image_mode="RGBA")
|
192 |
+
proceed_button = gr.Button("Proceed", elem_id="proceed")
|
193 |
+
model_output = gr.Image(label="Output")
|
194 |
+
is_nsfw = gr.JSON()
|
195 |
+
|
196 |
+
proceed_button.click(
|
197 |
+
fn=run_outpaint,
|
198 |
+
inputs=[
|
199 |
+
model_input,
|
200 |
+
sd_prompt,
|
201 |
+
sd_strength,
|
202 |
+
sd_guidance,
|
203 |
+
sd_step,
|
204 |
+
init_mode,
|
205 |
+
],
|
206 |
+
outputs=[model_output, is_nsfw],
|
207 |
+
)
|
208 |
+
|
209 |
+
|
210 |
+
blocks.config['dev_mode'] = False
|
211 |
+
|
212 |
+
S3_HOST = "https://s3.amazonaws.com"
|
213 |
+
|
214 |
+
|
215 |
+
@app.get("/uploads/{path:path}")
|
216 |
+
async def uploads(path: str, response: Response):
|
217 |
+
async with httpx.AsyncClient() as client:
|
218 |
+
proxy = await client.get(f"{S3_HOST}/{path}")
|
219 |
+
response.body = proxy.content
|
220 |
+
response.status_code = proxy.status_code
|
221 |
+
response.headers['Access-Control-Allow-Origin'] = '*'
|
222 |
+
response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, OPTIONS'
|
223 |
+
response.headers['Access-Control-Allow-Headers'] = 'Authorization, Content-Type'
|
224 |
+
return response
|
225 |
+
|
226 |
+
|
227 |
+
app = gr.mount_gradio_app(app, blocks, "/gradio",
|
228 |
+
gradio_api_url="http://localhost:7860/gradio")
|
229 |
+
|
230 |
+
app.mount("/", StaticFiles(directory="../static", html=True), name="static")
|
231 |
+
|
232 |
+
if __name__ == "__main__":
|
233 |
+
uvicorn.run(app, host="0.0.0.0", port=7860,
|
234 |
+
log_level="debug", reload=False)
|
stablediffusion-infinity/mask.png
ADDED
stablediffusion-infinity/perlin2d.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
##########
|
4 |
+
# https://stackoverflow.com/questions/42147776/producing-2d-perlin-noise-with-numpy/42154921#42154921
|
5 |
+
def perlin(x, y, seed=0):
|
6 |
+
# permutation table
|
7 |
+
np.random.seed(seed)
|
8 |
+
p = np.arange(256, dtype=int)
|
9 |
+
np.random.shuffle(p)
|
10 |
+
p = np.stack([p, p]).flatten()
|
11 |
+
# coordinates of the top-left
|
12 |
+
xi, yi = x.astype(int), y.astype(int)
|
13 |
+
# internal coordinates
|
14 |
+
xf, yf = x - xi, y - yi
|
15 |
+
# fade factors
|
16 |
+
u, v = fade(xf), fade(yf)
|
17 |
+
# noise components
|
18 |
+
n00 = gradient(p[p[xi] + yi], xf, yf)
|
19 |
+
n01 = gradient(p[p[xi] + yi + 1], xf, yf - 1)
|
20 |
+
n11 = gradient(p[p[xi + 1] + yi + 1], xf - 1, yf - 1)
|
21 |
+
n10 = gradient(p[p[xi + 1] + yi], xf - 1, yf)
|
22 |
+
# combine noises
|
23 |
+
x1 = lerp(n00, n10, u)
|
24 |
+
x2 = lerp(n01, n11, u) # FIX1: I was using n10 instead of n01
|
25 |
+
return lerp(x1, x2, v) # FIX2: I also had to reverse x1 and x2 here
|
26 |
+
|
27 |
+
|
28 |
+
def lerp(a, b, x):
|
29 |
+
"linear interpolation"
|
30 |
+
return a + x * (b - a)
|
31 |
+
|
32 |
+
|
33 |
+
def fade(t):
|
34 |
+
"6t^5 - 15t^4 + 10t^3"
|
35 |
+
return 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3
|
36 |
+
|
37 |
+
|
38 |
+
def gradient(h, x, y):
|
39 |
+
"grad converts h to the right gradient vector and return the dot product with (x,y)"
|
40 |
+
vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])
|
41 |
+
g = vectors[h % 4]
|
42 |
+
return g[:, :, 0] * x + g[:, :, 1] * y
|
43 |
+
|
44 |
+
|
45 |
+
##########
|
stablediffusion-infinity/readme.md
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# stablediffusion-infinity
|
2 |
+
|
3 |
+
Outpainting with Stable Diffusion on an infinite canvas.
|
4 |
+
|
5 |
+
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lkwq007/stablediffusion-infinity/blob/master/stablediffusion_infinity_colab.ipynb)
|
6 |
+
|
7 |
+
Start with init_image:
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
https://user-images.githubusercontent.com/1665437/190231611-fc263115-0fb9-4f2d-a71b-7e500c1e311d.mp4
|
12 |
+
|
13 |
+
|
14 |
+
Start with text2img:
|
15 |
+
|
16 |
+
https://user-images.githubusercontent.com/1665437/190212025-f4a82c46-0ff1-4ca2-b79b-6c81601e3eed.mp4
|
17 |
+
|
18 |
+
|
19 |
+
It is recommended to run the notebook on a local server for better interactive control.
|
20 |
+
|
21 |
+
The notebook might work on Windows (see this issue https://github.com/lkwq007/stablediffusion-infinity/issues/12 for more information) and Apple Silicon devices (untested, check guide here: https://huggingface.co/docs/diffusers/optimization/mps).
|
22 |
+
|
23 |
+
## Status
|
24 |
+
|
25 |
+
This project mainly works as a proof of concept. In that case, the UI design is relatively weak, and the quality of results is not guaranteed.
|
26 |
+
You may need to do prompt engineering or change the size of the selection box to get better outpainting results.
|
27 |
+
|
28 |
+
Pull requests are welcome for better UI control, ideas to achieve better results, or any other improvements.
|
29 |
+
|
30 |
+
## Setup environment
|
31 |
+
setup with `environment.yml`
|
32 |
+
```
|
33 |
+
git clone --recurse-submodules https://github.com/lkwq007/stablediffusion-infinity
|
34 |
+
cd stablediffusion-infinity
|
35 |
+
conda env create -f environment.yml
|
36 |
+
```
|
37 |
+
|
38 |
+
if the `environment.yml` doesn't work for you, you may install dependencies manually:
|
39 |
+
```
|
40 |
+
conda create -n sd-inf python=3.10
|
41 |
+
conda activate sd-inf
|
42 |
+
conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch
|
43 |
+
conda install scipy scikit-image
|
44 |
+
conda install -c conda-forge diffusers transformers ftfy
|
45 |
+
pip install opencv-python
|
46 |
+
pip install gradio==3.4
|
47 |
+
```
|
48 |
+
|
49 |
+
For windows, you may need to replace `pip install opencv-python` with `conda install -c conda-forge opencv`
|
50 |
+
## CPP library (optional)
|
51 |
+
|
52 |
+
Note that `opencv` library (e.g. `libopencv-dev`/`opencv-devel`, the package name may differ on different distributions) is required for `PyPatchMatch`. You may need to install `opencv` by yourself. If no `opencv` installed, the `patch_match` option (usually better quality) won't work.
|
53 |
+
|
54 |
+
## How-to
|
55 |
+
|
56 |
+
```
|
57 |
+
conda activate sd-inf
|
58 |
+
python app.py
|
59 |
+
```
|
60 |
+
|
61 |
+
## Running with Docker
|
62 |
+
|
63 |
+
This should get you started without needing to manually install anything, except for having an environment with Docker installed and an Nvidia GPU.
|
64 |
+
This has been tested on Docker Desktop on Windows 10 using the WSL2 backend.
|
65 |
+
|
66 |
+
First, update the .env file with your Huggingface token from https://huggingface.co/settings/tokens
|
67 |
+
|
68 |
+
Open your shell that has docker and run these commands
|
69 |
+
|
70 |
+
```
|
71 |
+
cd stablediffusion-infinity
|
72 |
+
docker-compose build
|
73 |
+
docker-compose up
|
74 |
+
```
|
75 |
+
|
76 |
+
Watch the log for the url to open in your browser. Choose the one that starts with http://127.0.0.1:8888/
|
77 |
+
|
78 |
+
|
79 |
+
## FAQs
|
80 |
+
|
81 |
+
- Troubleshooting on Windows:
|
82 |
+
- https://github.com/lkwq007/stablediffusion-infinity/issues/12
|
83 |
+
- False positive rate of safety checker is quite high:
|
84 |
+
- https://github.com/lkwq007/stablediffusion-infinity/issues/8#issuecomment-1248448453
|
85 |
+
- What is the init_mode
|
86 |
+
- init_mode indicates how to fill the empty/masked region, usually `patch_match` is better than others
|
87 |
+
- The GUI is lagging on colab
|
88 |
+
- It is recommended to run the notebook on a local server since the interactions and canvas content updates are actually handled by the python backend on the serverside, and that's how `ipycanvas` works
|
89 |
+
- colab doesn't support the latest version of `ipycanvas`, which may have better performance
|
90 |
+
|
91 |
+
## Credit
|
92 |
+
|
93 |
+
The code of `perlin2d.py` is from https://stackoverflow.com/questions/42147776/producing-2d-perlin-noise-with-numpy/42154921#42154921 and is **not** included in the scope of LICENSE used in this repo.
|
stablediffusion-infinity/utils.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
from PIL import ImageFilter
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
import scipy
|
6 |
+
import scipy.signal
|
7 |
+
from scipy.spatial import cKDTree
|
8 |
+
|
9 |
+
import os
|
10 |
+
from perlin2d import *
|
11 |
+
|
12 |
+
patch_match_compiled = True
|
13 |
+
|
14 |
+
from PyPatchMatch import patch_match
|
15 |
+
|
16 |
+
|
17 |
+
def edge_pad(img, mask, mode=1):
|
18 |
+
if mode == 0:
|
19 |
+
nmask = mask.copy()
|
20 |
+
nmask[nmask > 0] = 1
|
21 |
+
res0 = 1 - nmask
|
22 |
+
res1 = nmask
|
23 |
+
p0 = np.stack(res0.nonzero(), axis=0).transpose()
|
24 |
+
p1 = np.stack(res1.nonzero(), axis=0).transpose()
|
25 |
+
min_dists, min_dist_idx = cKDTree(p1).query(p0, 1)
|
26 |
+
loc = p1[min_dist_idx]
|
27 |
+
for (a, b), (c, d) in zip(p0, loc):
|
28 |
+
img[a, b] = img[c, d]
|
29 |
+
elif mode == 1:
|
30 |
+
record = {}
|
31 |
+
kernel = [[1] * 3 for _ in range(3)]
|
32 |
+
nmask = mask.copy()
|
33 |
+
nmask[nmask > 0] = 1
|
34 |
+
res = scipy.signal.convolve2d(
|
35 |
+
nmask, kernel, mode="same", boundary="fill", fillvalue=1
|
36 |
+
)
|
37 |
+
res[nmask < 1] = 0
|
38 |
+
res[res == 9] = 0
|
39 |
+
res[res > 0] = 1
|
40 |
+
ylst, xlst = res.nonzero()
|
41 |
+
queue = [(y, x) for y, x in zip(ylst, xlst)]
|
42 |
+
# bfs here
|
43 |
+
cnt = res.astype(np.float32)
|
44 |
+
acc = img.astype(np.float32)
|
45 |
+
step = 1
|
46 |
+
h = acc.shape[0]
|
47 |
+
w = acc.shape[1]
|
48 |
+
offset = [(1, 0), (-1, 0), (0, 1), (0, -1)]
|
49 |
+
while queue:
|
50 |
+
target = []
|
51 |
+
for y, x in queue:
|
52 |
+
val = acc[y][x]
|
53 |
+
for yo, xo in offset:
|
54 |
+
yn = y + yo
|
55 |
+
xn = x + xo
|
56 |
+
if 0 <= yn < h and 0 <= xn < w and nmask[yn][xn] < 1:
|
57 |
+
if record.get((yn, xn), step) == step:
|
58 |
+
acc[yn][xn] = acc[yn][xn] * cnt[yn][xn] + val
|
59 |
+
cnt[yn][xn] += 1
|
60 |
+
acc[yn][xn] /= cnt[yn][xn]
|
61 |
+
if (yn, xn) not in record:
|
62 |
+
record[(yn, xn)] = step
|
63 |
+
target.append((yn, xn))
|
64 |
+
step += 1
|
65 |
+
queue = target
|
66 |
+
img = acc.astype(np.uint8)
|
67 |
+
else:
|
68 |
+
nmask = mask.copy()
|
69 |
+
ylst, xlst = nmask.nonzero()
|
70 |
+
yt, xt = ylst.min(), xlst.min()
|
71 |
+
yb, xb = ylst.max(), xlst.max()
|
72 |
+
content = img[yt : yb + 1, xt : xb + 1]
|
73 |
+
img = np.pad(
|
74 |
+
content,
|
75 |
+
((yt, mask.shape[0] - yb - 1), (xt, mask.shape[1] - xb - 1), (0, 0)),
|
76 |
+
mode="edge",
|
77 |
+
)
|
78 |
+
return img, mask
|
79 |
+
|
80 |
+
|
81 |
+
def perlin_noise(img, mask):
|
82 |
+
lin = np.linspace(0, 5, mask.shape[0], endpoint=False)
|
83 |
+
x, y = np.meshgrid(lin, lin)
|
84 |
+
avg = img.mean(axis=0).mean(axis=0)
|
85 |
+
# noise=[((perlin(x, y)+1)*128+avg[i]).astype(np.uint8) for i in range(3)]
|
86 |
+
noise = [((perlin(x, y) + 1) * 0.5 * 255).astype(np.uint8) for i in range(3)]
|
87 |
+
noise = np.stack(noise, axis=-1)
|
88 |
+
# mask=skimage.measure.block_reduce(mask,(8,8),np.min)
|
89 |
+
# mask=mask.repeat(8, axis=0).repeat(8, axis=1)
|
90 |
+
# mask_image=Image.fromarray(mask)
|
91 |
+
# mask_image=mask_image.filter(ImageFilter.GaussianBlur(radius = 4))
|
92 |
+
# mask=np.array(mask_image)
|
93 |
+
nmask = mask.copy()
|
94 |
+
# nmask=nmask/255.0
|
95 |
+
nmask[mask > 0] = 1
|
96 |
+
img = nmask[:, :, np.newaxis] * img + (1 - nmask[:, :, np.newaxis]) * noise
|
97 |
+
# img=img.astype(np.uint8)
|
98 |
+
return img, mask
|
99 |
+
|
100 |
+
|
101 |
+
def gaussian_noise(img, mask):
|
102 |
+
noise = np.random.randn(mask.shape[0], mask.shape[1], 3)
|
103 |
+
noise = (noise + 1) / 2 * 255
|
104 |
+
noise = noise.astype(np.uint8)
|
105 |
+
nmask = mask.copy()
|
106 |
+
nmask[mask > 0] = 1
|
107 |
+
img = nmask[:, :, np.newaxis] * img + (1 - nmask[:, :, np.newaxis]) * noise
|
108 |
+
return img, mask
|
109 |
+
|
110 |
+
|
111 |
+
def cv2_telea(img, mask):
|
112 |
+
ret = cv2.inpaint(img, 255 - mask, 5, cv2.INPAINT_TELEA)
|
113 |
+
return ret, mask
|
114 |
+
|
115 |
+
|
116 |
+
def cv2_ns(img, mask):
|
117 |
+
ret = cv2.inpaint(img, 255 - mask, 5, cv2.INPAINT_NS)
|
118 |
+
return ret, mask
|
119 |
+
|
120 |
+
|
121 |
+
def patch_match_func(img, mask):
|
122 |
+
ret = patch_match.inpaint(img, mask=255 - mask, patch_size=3)
|
123 |
+
return ret, mask
|
124 |
+
|
125 |
+
|
126 |
+
def mean_fill(img, mask):
|
127 |
+
avg = img.mean(axis=0).mean(axis=0)
|
128 |
+
img[mask < 1] = avg
|
129 |
+
return img, mask
|
130 |
+
|
131 |
+
|
132 |
+
functbl = {
|
133 |
+
"gaussian": gaussian_noise,
|
134 |
+
"perlin": perlin_noise,
|
135 |
+
"edge_pad": edge_pad,
|
136 |
+
"patchmatch": patch_match_func if (os.name != "nt" and patch_match_compiled) else edge_pad,
|
137 |
+
"cv2_ns": cv2_ns,
|
138 |
+
"cv2_telea": cv2_telea,
|
139 |
+
"mean_fill": mean_fill,
|
140 |
+
}
|