Spaces:
No application file
No application file
kevinwang676
commited on
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +18 -0
- .gitignore +134 -0
- .gitmodules +11 -0
- Dockerfile +19 -0
- LICENSE +21 -0
- MMCM/.gitignore +139 -0
- MMCM/Dockerfile +83 -0
- MMCM/README.md +2 -0
- MMCM/requirements.txt +291 -0
- MMCM/setup.py +25 -0
- README-zh.md +545 -0
- configs/model/T2I_all_model.py +15 -0
- configs/model/ip_adapter.py +66 -0
- configs/model/lcm_model.py +17 -0
- configs/model/motion_model.py +22 -0
- configs/model/negative_prompt.py +32 -0
- configs/model/referencenet.py +14 -0
- configs/tasks/example.yaml +215 -0
- controlnet_aux/.gitignore +178 -0
- controlnet_aux/LICENSE.txt +201 -0
- controlnet_aux/README.md +112 -0
- controlnet_aux/setup.py +233 -0
- controlnet_aux/src/controlnet_aux/__init__.py +18 -0
- controlnet_aux/src/controlnet_aux/canny/__init__.py +36 -0
- controlnet_aux/src/controlnet_aux/dwpose/__init__.py +235 -0
- controlnet_aux/src/controlnet_aux/dwpose/dwpose_config/dwpose-l_384x288.py +257 -0
- controlnet_aux/src/controlnet_aux/dwpose/util.py +303 -0
- controlnet_aux/src/controlnet_aux/dwpose/wholebody.py +123 -0
- controlnet_aux/src/controlnet_aux/dwpose/yolox_config/yolox_l_8xb8-300e_coco.py +245 -0
- controlnet_aux/src/controlnet_aux/hed/__init__.py +129 -0
- controlnet_aux/src/controlnet_aux/leres/__init__.py +118 -0
- controlnet_aux/src/controlnet_aux/leres/leres/LICENSE +23 -0
- controlnet_aux/src/controlnet_aux/leres/leres/Resnet.py +199 -0
- controlnet_aux/src/controlnet_aux/leres/leres/Resnext_torch.py +237 -0
- controlnet_aux/src/controlnet_aux/leres/leres/__init__.py +0 -0
- controlnet_aux/src/controlnet_aux/leres/leres/depthmap.py +548 -0
- controlnet_aux/src/controlnet_aux/leres/leres/multi_depth_model_woauxi.py +35 -0
- controlnet_aux/src/controlnet_aux/leres/leres/net_tools.py +54 -0
- controlnet_aux/src/controlnet_aux/leres/leres/network_auxi.py +417 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/LICENSE +19 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/__init__.py +0 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/models/__init__.py +67 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model.py +244 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model_hg.py +58 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/models/networks.py +623 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/models/pix2pix4depth_model.py +155 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/options/__init__.py +1 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/options/base_options.py +156 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/options/test_options.py +22 -0
- controlnet_aux/src/controlnet_aux/leres/pix2pix/util/__init__.py +1 -0
.gitattributes
CHANGED
@@ -33,3 +33,21 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
data/images/duffy.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
data/result_video/Mona_Lisa,_by_Leonardo_da_Vinci,_from_C2RMF_retouched.mp4 filter=lfs diff=lfs merge=lfs -text
|
38 |
+
data/result_video/Portrait-of-Dr.-Gachet.mp4 filter=lfs diff=lfs merge=lfs -text
|
39 |
+
data/result_video/Self-Portrait-with-Cropped-Hair.mp4 filter=lfs diff=lfs merge=lfs -text
|
40 |
+
data/result_video/The-Laughing-Cavalier.mp4 filter=lfs diff=lfs merge=lfs -text
|
41 |
+
data/result_video/boy_play_guitar.mp4 filter=lfs diff=lfs merge=lfs -text
|
42 |
+
data/result_video/boy_play_guitar2.mp4 filter=lfs diff=lfs merge=lfs -text
|
43 |
+
data/result_video/dufu.mp4 filter=lfs diff=lfs merge=lfs -text
|
44 |
+
data/result_video/girl_play_guitar2.mp4 filter=lfs diff=lfs merge=lfs -text
|
45 |
+
data/result_video/girl_play_guitar4.mp4 filter=lfs diff=lfs merge=lfs -text
|
46 |
+
data/result_video/jinkesi2.mp4 filter=lfs diff=lfs merge=lfs -text
|
47 |
+
data/result_video/river.mp4 filter=lfs diff=lfs merge=lfs -text
|
48 |
+
data/result_video/seaside2.mp4 filter=lfs diff=lfs merge=lfs -text
|
49 |
+
data/result_video/seaside4.mp4 filter=lfs diff=lfs merge=lfs -text
|
50 |
+
data/result_video/seaside_girl.mp4 filter=lfs diff=lfs merge=lfs -text
|
51 |
+
data/result_video/waterfall4.mp4 filter=lfs diff=lfs merge=lfs -text
|
52 |
+
data/result_video/yongen.mp4 filter=lfs diff=lfs merge=lfs -text
|
53 |
+
mmcm/vision/feature_extractor/wenlan/example/vivo_5w_lyrics.lyric filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
pip-wheel-metadata/
|
24 |
+
share/python-wheels/
|
25 |
+
*.egg-info/
|
26 |
+
.installed.cfg
|
27 |
+
*.egg
|
28 |
+
MANIFEST
|
29 |
+
|
30 |
+
# PyInstaller
|
31 |
+
# Usually these files are written by a python script from a template
|
32 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
33 |
+
*.manifest
|
34 |
+
*.spec
|
35 |
+
|
36 |
+
# Installer logs
|
37 |
+
pip-log.txt
|
38 |
+
pip-delete-this-directory.txt
|
39 |
+
|
40 |
+
# Unit test / coverage reports
|
41 |
+
htmlcov/
|
42 |
+
.tox/
|
43 |
+
.nox/
|
44 |
+
.coverage
|
45 |
+
.coverage.*
|
46 |
+
.cache
|
47 |
+
nosetests.xml
|
48 |
+
coverage.xml
|
49 |
+
*.cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
|
53 |
+
# Translations
|
54 |
+
*.mo
|
55 |
+
*.pot
|
56 |
+
|
57 |
+
# Django stuff:
|
58 |
+
*.log
|
59 |
+
local_settings.py
|
60 |
+
db.sqlite3
|
61 |
+
|
62 |
+
# Flask stuff:
|
63 |
+
instance/
|
64 |
+
.webassets-cache
|
65 |
+
|
66 |
+
# Scrapy stuff:
|
67 |
+
.scrapy
|
68 |
+
|
69 |
+
# Sphinx documentation
|
70 |
+
docs/_build/
|
71 |
+
|
72 |
+
# PyBuilder
|
73 |
+
target/
|
74 |
+
|
75 |
+
# Jupyter Notebook
|
76 |
+
.ipynb_checkpoints
|
77 |
+
|
78 |
+
# IPython
|
79 |
+
profile_default/
|
80 |
+
ipython_config.py
|
81 |
+
|
82 |
+
# pyenv
|
83 |
+
.python-version
|
84 |
+
|
85 |
+
# pipenv
|
86 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
87 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
88 |
+
# having no cross-platform support, pipenv may install dependencies that don’t work, or not
|
89 |
+
# install all needed dependencies.
|
90 |
+
#Pipfile.lock
|
91 |
+
|
92 |
+
# celery beat schedule file
|
93 |
+
celerybeat-schedule
|
94 |
+
|
95 |
+
# SageMath parsed files
|
96 |
+
*.sage.py
|
97 |
+
|
98 |
+
# Environments
|
99 |
+
.env
|
100 |
+
.venv
|
101 |
+
env/
|
102 |
+
venv/
|
103 |
+
ENV/
|
104 |
+
env.bak/
|
105 |
+
venv.bak/
|
106 |
+
|
107 |
+
# Spyder project settings
|
108 |
+
.spyderproject
|
109 |
+
.spyproject
|
110 |
+
|
111 |
+
# Rope project settings
|
112 |
+
.ropeproject
|
113 |
+
|
114 |
+
# mkdocs documentation
|
115 |
+
/site
|
116 |
+
|
117 |
+
# mypy
|
118 |
+
.mypy_cache/
|
119 |
+
.dmypy.json
|
120 |
+
dmypy.json
|
121 |
+
|
122 |
+
# Pyre type checker
|
123 |
+
.pyre/
|
124 |
+
|
125 |
+
*.swp
|
126 |
+
.*.swp
|
127 |
+
|
128 |
+
.DS_Store
|
129 |
+
|
130 |
+
# project
|
131 |
+
outputs/
|
132 |
+
results/
|
133 |
+
scripts/codetest/
|
134 |
+
# configs/train/video_creation_anchorxia_*
|
.gitmodules
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "MMCM"]
|
2 |
+
path = MMCM
|
3 |
+
url = https://github.com/TMElyralab/MMCM.git
|
4 |
+
[submodule "controlnet_aux"]
|
5 |
+
path = controlnet_aux
|
6 |
+
url = https://github.com/TMElyralab/controlnet_aux.git
|
7 |
+
branch = tme
|
8 |
+
[submodule "diffusers"]
|
9 |
+
path = diffusers
|
10 |
+
url = https://github.com/TMElyralab/diffusers.git
|
11 |
+
branch = tme
|
Dockerfile
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM anchorxia/musev:1.0.0
|
2 |
+
|
3 |
+
#MAINTAINER 维护者信息
|
4 |
+
LABEL MAINTAINER="anchorxia"
|
5 |
+
LABEL Email="anchorxia@tencent.com"
|
6 |
+
LABEL Description="musev gpu runtime image, base docker is pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel"
|
7 |
+
ARG DEBIAN_FRONTEND=noninteractive
|
8 |
+
|
9 |
+
USER root
|
10 |
+
|
11 |
+
SHELL ["/bin/bash", "--login", "-c"]
|
12 |
+
|
13 |
+
RUN . /opt/conda/etc/profile.d/conda.sh \
|
14 |
+
&& echo "source activate musev" >> ~/.bashrc \
|
15 |
+
&& conda activate musev \
|
16 |
+
&& conda env list \
|
17 |
+
&& pip install cuid
|
18 |
+
|
19 |
+
USER root
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 TMElyralab
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
MMCM/.gitignore
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
pip-wheel-metadata/
|
24 |
+
share/python-wheels/
|
25 |
+
*.egg-info/
|
26 |
+
.installed.cfg
|
27 |
+
*.egg
|
28 |
+
MANIFEST
|
29 |
+
|
30 |
+
# PyInstaller
|
31 |
+
# Usually these files are written by a python script from a template
|
32 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
33 |
+
*.manifest
|
34 |
+
*.spec
|
35 |
+
|
36 |
+
# Installer logs
|
37 |
+
pip-log.txt
|
38 |
+
pip-delete-this-directory.txt
|
39 |
+
|
40 |
+
# Unit test / coverage reports
|
41 |
+
htmlcov/
|
42 |
+
.tox/
|
43 |
+
.nox/
|
44 |
+
.coverage
|
45 |
+
.coverage.*
|
46 |
+
.cache
|
47 |
+
nosetests.xml
|
48 |
+
coverage.xml
|
49 |
+
*.cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
|
53 |
+
# Translations
|
54 |
+
*.mo
|
55 |
+
*.pot
|
56 |
+
|
57 |
+
# Django stuff:
|
58 |
+
*.log
|
59 |
+
local_settings.py
|
60 |
+
db.sqlite3
|
61 |
+
|
62 |
+
# Flask stuff:
|
63 |
+
instance/
|
64 |
+
.webassets-cache
|
65 |
+
|
66 |
+
# Scrapy stuff:
|
67 |
+
.scrapy
|
68 |
+
|
69 |
+
# Sphinx documentation
|
70 |
+
docs/_build/
|
71 |
+
|
72 |
+
# PyBuilder
|
73 |
+
target/
|
74 |
+
|
75 |
+
# Jupyter Notebook
|
76 |
+
.ipynb_checkpoints
|
77 |
+
|
78 |
+
# IPython
|
79 |
+
profile_default/
|
80 |
+
ipython_config.py
|
81 |
+
|
82 |
+
# pyenv
|
83 |
+
.python-version
|
84 |
+
|
85 |
+
# pipenv
|
86 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
87 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
88 |
+
# having no cross-platform support, pipenv may install dependencies that don’t work, or not
|
89 |
+
# install all needed dependencies.
|
90 |
+
#Pipfile.lock
|
91 |
+
|
92 |
+
# celery beat schedule file
|
93 |
+
celerybeat-schedule
|
94 |
+
|
95 |
+
# SageMath parsed files
|
96 |
+
*.sage.py
|
97 |
+
|
98 |
+
# Environments
|
99 |
+
.env
|
100 |
+
.venv
|
101 |
+
env/
|
102 |
+
venv/
|
103 |
+
ENV/
|
104 |
+
env.bak/
|
105 |
+
venv.bak/
|
106 |
+
|
107 |
+
# Spyder project settings
|
108 |
+
.spyderproject
|
109 |
+
.spyproject
|
110 |
+
|
111 |
+
# Rope project settings
|
112 |
+
.ropeproject
|
113 |
+
|
114 |
+
# mkdocs documentation
|
115 |
+
/site
|
116 |
+
|
117 |
+
# mypy
|
118 |
+
.mypy_cache/
|
119 |
+
.dmypy.json
|
120 |
+
dmypy.json
|
121 |
+
|
122 |
+
# Pyre type checker
|
123 |
+
.pyre/
|
124 |
+
|
125 |
+
*.swp
|
126 |
+
.*.swp
|
127 |
+
dataset/files
|
128 |
+
experiments
|
129 |
+
log
|
130 |
+
csvs
|
131 |
+
|
132 |
+
.idea
|
133 |
+
.vscode
|
134 |
+
__pycache__/
|
135 |
+
*.code-workspace
|
136 |
+
.DS_Store
|
137 |
+
third_party/
|
138 |
+
.polaris_cache/
|
139 |
+
*.lock
|
MMCM/Dockerfile
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# FROM mirrors.tencent.com/todacc/venus-std-base-cuda11.8:0.1.0
|
2 |
+
FROM mirrors.tencent.com/todacc/venus-std-ext-cuda11.8-pytorch2.0-tf2.12-py3.10:0.7.0
|
3 |
+
|
4 |
+
#MAINTAINER 维护者信息
|
5 |
+
LABEL MAINTAINER="anchorxia"
|
6 |
+
LABEL Email="xzqjack@hotmail.com"
|
7 |
+
LABEL Description="gpu development image, from mirrors.tencent.com/todacc/venus-std-ext-cuda11.8-pytorch2.0-tf2.12-py3.10:0.7.0"
|
8 |
+
|
9 |
+
USER root
|
10 |
+
# 安装必须软件
|
11 |
+
# RUN GENERIC_REPO_URL="http://mirrors.tencent.com/repository/generic/venus_repo/image_res" \
|
12 |
+
# && cd /data/ \
|
13 |
+
# && wget -q $GENERIC_REPO_URL/gcc/gcc-11.2.0.zip \
|
14 |
+
# && unzip -q gcc-11.2.0.zip \
|
15 |
+
# && cd gcc-releases-gcc-11.2.0 \
|
16 |
+
# && ./contrib/download_prerequisites \
|
17 |
+
# && ./configure --enable-bootstrap --enable-languages=c,c++ --enable-threads=posix --enable-checking=release --enable-multilib --with-system-zlib \
|
18 |
+
# && make --silent -j10 \
|
19 |
+
# && make --silent install \
|
20 |
+
# && gcc -v \
|
21 |
+
# && rm -rf /data/gcc-releases-gcc-11.2.0 /data/gcc-11.2.0.zip
|
22 |
+
|
23 |
+
# RUN yum update -y \
|
24 |
+
# && yum install -y epel-release \
|
25 |
+
# && yum install -y ffmpeg \
|
26 |
+
# && yum install -y Xvfb \
|
27 |
+
# && yum install -y centos-release-scl devtoolset-11
|
28 |
+
RUN yum install -y wget zsh git curl tmux cmake htop iotop git-lfs zip \
|
29 |
+
&& yum install -y autojump autojump-zsh portaudio portaudio-devel \
|
30 |
+
&& yum clean all
|
31 |
+
|
32 |
+
USER mqq
|
33 |
+
RUN source ~/.bashrc \
|
34 |
+
&& GENERIC_REPO_URL="http://mirrors.tencent.com/repository/generic/venus_repo/image_res" \
|
35 |
+
&& conda deactivate \
|
36 |
+
# && conda remove -y -n env-2.7.18 --all \
|
37 |
+
# && conda remove -y -n env-3.6.8 --all \
|
38 |
+
# && conda remove -y -n env-3.7.7 --all \
|
39 |
+
# && conda remove -y -n env-3.8.8 --all \
|
40 |
+
# && conda remove -y -n env-3.9.2 --all \
|
41 |
+
# && conda remove -y -n env-novelai --all \
|
42 |
+
&& conda create -n projectv python=3.10.6 -y \
|
43 |
+
&& conda activate projectv \
|
44 |
+
&& pip install venus-sdk -q -i https://mirrors.tencent.com/repository/pypi/tencent_pypi/simple \
|
45 |
+
--extra-index-url https://mirrors.tencent.com/pypi/simple/ \
|
46 |
+
&& pip install tensorflow==2.12.0 tensorboard==2.12.0 \
|
47 |
+
&& pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 -f https://mirror.sjtu.edu.cn/pytorch-wheels/torch_stable.html -i https://mirrors.bfsu.edu.cn/pypi/web/simple -U \
|
48 |
+
# 安装xformers,支持不同型号gpu
|
49 |
+
&& pip install ninja==1.11.1 \
|
50 |
+
# && git clone https://github.com/facebookresearch/xformers.git \
|
51 |
+
# && cd xformers \
|
52 |
+
# && git checkout v0.0.17rc482 \
|
53 |
+
# && git submodule update --init --recursive \
|
54 |
+
# && pip install numpy==1.23.4 pyre-extensions==0.0.23 \
|
55 |
+
# && FORCE_CUDA="1" MAX_JOBS=1 TORCH_CUDA_ARCH_LIST="6.1;7.0;7.5;8.0;8.6" pip install -e . \
|
56 |
+
# && cd .. \
|
57 |
+
# 安装一堆包
|
58 |
+
&& pip install --no-cache-dir transformers bitsandbytes decord accelerate xformers omegaconf einops imageio==2.31.1 \
|
59 |
+
&& pip install --no-cache-dir pandas h5py matplotlib modelcards pynvml black pytest moviepy torch-tb-profiler scikit-learn librosa ffmpeg easydict webp controlnet_aux mediapipe \
|
60 |
+
&& pip install --no-cache-dir Cython easydict gdown infomap insightface ipython librosa onnx onnxruntime onnxsim opencv_python Pillow protobuf pytube PyYAML \
|
61 |
+
&& pip install --no-cache-dir requests scipy six tqdm gradio albumentations opencv-contrib-python imageio-ffmpeg pytorch-lightning test-tube \
|
62 |
+
&& pip install --no-cache-dir timm addict yapf prettytable safetensors basicsr fvcore pycocotools wandb gunicorn \
|
63 |
+
&& pip install --no-cache-dir streamlit webdataset kornia open_clip_torch streamlit-drawable-canvas torchmetrics \
|
64 |
+
# 安装暗水印
|
65 |
+
&& pip install --no-cache-dir invisible-watermark==0.1.5 gdown==4.5.3 ftfy==6.1.1 modelcards==0.1.6 \
|
66 |
+
# 安装openmm相关包
|
67 |
+
&& pip install--no-cache-dir -U openmim \
|
68 |
+
&& mim install mmengine \
|
69 |
+
&& mim install "mmcv>=2.0.1" \
|
70 |
+
&& mim install "mmdet>=3.1.0" \
|
71 |
+
&& mim install "mmpose>=1.1.0" \
|
72 |
+
# jupyters
|
73 |
+
&& pip install ipywidgets==8.0.3 \
|
74 |
+
&& python -m ipykernel install --user --name projectv --display-name "python(projectv)" \
|
75 |
+
&& pip install --no-cache-dir matplotlib==3.6.2 redis==4.5.1 pydantic[dotenv]==1.10.2 loguru==0.6.0 IProgress==0.4 \
|
76 |
+
&& pip install --no-cache-dir cos-python-sdk-v5==1.9.22 coscmd==1.8.6.30 \
|
77 |
+
# 必须放在最后pip,避免和jupyter的不兼容
|
78 |
+
&& pip install --no-cache-dir markupsafe==2.0.1 \
|
79 |
+
&& wget -P /tmp $GENERIC_REPO_URL/cpu/clean-layer.sh \
|
80 |
+
&& sh /tmp/clean-layer.sh
|
81 |
+
|
82 |
+
ENV LD_LIBRARY_PATH=/usr/local/lib64:$LD_LIBRARY_PATH
|
83 |
+
USER root
|
MMCM/README.md
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# MMCM
|
2 |
+
Process package for multi media, cross multi modal.
|
MMCM/requirements.txt
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==2.1.0
|
2 |
+
accelerate==0.22.0
|
3 |
+
addict==2.4.0
|
4 |
+
aiofiles==23.2.1
|
5 |
+
aiohttp==3.9.1
|
6 |
+
aiosignal==1.3.1
|
7 |
+
albumentations==1.3.1
|
8 |
+
aliyun-python-sdk-core==2.14.0
|
9 |
+
aliyun-python-sdk-kms==2.16.2
|
10 |
+
altair==5.2.0
|
11 |
+
antlr4-python3-runtime==4.9.3
|
12 |
+
anyio==4.2.0
|
13 |
+
appdirs==1.4.4
|
14 |
+
argparse==1.4.0
|
15 |
+
asttokens==2.4.1
|
16 |
+
astunparse==1.6.3
|
17 |
+
async-timeout==4.0.3
|
18 |
+
attrs==23.2.0
|
19 |
+
audioread==3.0.1
|
20 |
+
basicsr==1.4.2
|
21 |
+
beautifulsoup4==4.12.2
|
22 |
+
bitsandbytes==0.41.1
|
23 |
+
black==23.12.1
|
24 |
+
blinker==1.7.0
|
25 |
+
braceexpand==0.1.7
|
26 |
+
cachetools==5.3.2
|
27 |
+
certifi==2023.11.17
|
28 |
+
cffi==1.16.0
|
29 |
+
charset-normalizer==3.3.2
|
30 |
+
chumpy==0.70
|
31 |
+
click==8.1.7
|
32 |
+
cmake==3.28.1
|
33 |
+
colorama==0.4.6
|
34 |
+
coloredlogs==15.0.1
|
35 |
+
comm==0.2.1
|
36 |
+
contourpy==1.2.0
|
37 |
+
cos-python-sdk-v5==1.9.22
|
38 |
+
coscmd==1.8.6.30
|
39 |
+
crcmod==1.7
|
40 |
+
cryptography==41.0.7
|
41 |
+
cycler==0.12.1
|
42 |
+
cython==3.0.2
|
43 |
+
datetime==5.4
|
44 |
+
debugpy==1.8.0
|
45 |
+
decorator==4.4.2
|
46 |
+
decord==0.6.0
|
47 |
+
dill==0.3.7
|
48 |
+
docker-pycreds==0.4.0
|
49 |
+
dulwich==0.21.7
|
50 |
+
easydict==1.11
|
51 |
+
einops==0.7.0
|
52 |
+
exceptiongroup==1.2.0
|
53 |
+
executing==2.0.1
|
54 |
+
fastapi==0.109.0
|
55 |
+
ffmpeg==1.4
|
56 |
+
ffmpeg-python==0.2.0
|
57 |
+
ffmpy==0.3.1
|
58 |
+
filelock==3.13.1
|
59 |
+
flatbuffers==23.5.26
|
60 |
+
fonttools==4.47.2
|
61 |
+
frozenlist==1.4.1
|
62 |
+
fsspec==2023.12.2
|
63 |
+
ftfy==6.1.1
|
64 |
+
future==0.18.3
|
65 |
+
fuzzywuzzy==0.18.0
|
66 |
+
fvcore==0.1.5.post20221221
|
67 |
+
gast==0.4.0
|
68 |
+
gdown==4.5.3
|
69 |
+
gitdb==4.0.11
|
70 |
+
gitpython==3.1.41
|
71 |
+
google-auth==2.26.2
|
72 |
+
google-auth-oauthlib==0.4.6
|
73 |
+
google-pasta==0.2.0
|
74 |
+
gradio==3.43.2
|
75 |
+
gradio-client==0.5.0
|
76 |
+
grpcio==1.60.0
|
77 |
+
h11==0.14.0
|
78 |
+
h5py==3.10.0
|
79 |
+
httpcore==1.0.2
|
80 |
+
httpx==0.26.0
|
81 |
+
huggingface-hub==0.20.2
|
82 |
+
humanfriendly==10.0
|
83 |
+
idna==3.6
|
84 |
+
imageio==2.31.1
|
85 |
+
imageio-ffmpeg==0.4.8
|
86 |
+
importlib-metadata==7.0.1
|
87 |
+
importlib-resources==6.1.1
|
88 |
+
infomap==2.7.1
|
89 |
+
iniconfig==2.0.0
|
90 |
+
insightface==0.7.3
|
91 |
+
invisible-watermark==0.1.5
|
92 |
+
iopath==0.1.10
|
93 |
+
ip-adapter==0.1.0
|
94 |
+
iprogress==0.4
|
95 |
+
ipykernel==6.29.0
|
96 |
+
ipython==8.20.0
|
97 |
+
ipywidgets==8.0.3
|
98 |
+
jax==0.4.23
|
99 |
+
jedi==0.19.1
|
100 |
+
jinja2==3.1.3
|
101 |
+
jmespath==0.10.0
|
102 |
+
joblib==1.3.2
|
103 |
+
json-tricks==3.17.3
|
104 |
+
jsonschema==4.21.0
|
105 |
+
jsonschema-specifications==2023.12.1
|
106 |
+
jupyter-client==8.6.0
|
107 |
+
jupyter-core==5.7.1
|
108 |
+
jupyterlab-widgets==3.0.9
|
109 |
+
keras==2.12.0
|
110 |
+
kiwisolver==1.4.5
|
111 |
+
kornia==0.7.0
|
112 |
+
lazy-loader==0.3
|
113 |
+
libclang==16.0.6
|
114 |
+
librosa==0.10.1
|
115 |
+
lightning-utilities==0.10.0
|
116 |
+
lit==17.0.6
|
117 |
+
llvmlite==0.41.1
|
118 |
+
lmdb==1.4.1
|
119 |
+
loguru==0.6.0
|
120 |
+
markdown==3.5.2
|
121 |
+
markdown-it-py==3.0.0
|
122 |
+
markupsafe==2.0.1
|
123 |
+
matplotlib==3.6.2
|
124 |
+
matplotlib-inline==0.1.6
|
125 |
+
mdurl==0.1.2
|
126 |
+
mediapipe==0.10.3
|
127 |
+
ml-dtypes==0.3.2
|
128 |
+
mmcv==2.1.0
|
129 |
+
mmdet==3.2.0
|
130 |
+
mmengine==0.10.2
|
131 |
+
mmpose==1.3.1
|
132 |
+
model-index==0.1.11
|
133 |
+
modelcards==0.1.6
|
134 |
+
moviepy==1.0.3
|
135 |
+
mpmath==1.3.0
|
136 |
+
msgpack==1.0.7
|
137 |
+
multidict==6.0.4
|
138 |
+
munkres==1.1.4
|
139 |
+
mypy-extensions==1.0.0
|
140 |
+
nest-asyncio==1.5.9
|
141 |
+
networkx==3.2.1
|
142 |
+
ninja==1.11.1
|
143 |
+
numba==0.58.1
|
144 |
+
numpy==1.23.5
|
145 |
+
oauthlib==3.2.2
|
146 |
+
omegaconf==2.3.0
|
147 |
+
onnx==1.14.1
|
148 |
+
onnxruntime==1.15.1
|
149 |
+
onnxsim==0.4.33
|
150 |
+
open-clip-torch==2.20.0
|
151 |
+
opencv-contrib-python==4.8.0.76
|
152 |
+
opencv-python==4.9.0.80
|
153 |
+
opencv-python-headless==4.9.0.80
|
154 |
+
opendatalab==0.0.10
|
155 |
+
openmim==0.3.9
|
156 |
+
openxlab==0.0.34
|
157 |
+
opt-einsum==3.3.0
|
158 |
+
ordered-set==4.1.0
|
159 |
+
orjson==3.9.10
|
160 |
+
oss2==2.17.0
|
161 |
+
packaging==23.2
|
162 |
+
pandas==2.1.4
|
163 |
+
parso==0.8.3
|
164 |
+
pathspec==0.12.1
|
165 |
+
pathtools==0.1.2
|
166 |
+
pexpect==4.9.0
|
167 |
+
pillow==10.2.0
|
168 |
+
pip==23.3.1
|
169 |
+
platformdirs==4.1.0
|
170 |
+
pluggy==1.3.0
|
171 |
+
pooch==1.8.0
|
172 |
+
portalocker==2.8.2
|
173 |
+
prettytable==3.9.0
|
174 |
+
proglog==0.1.10
|
175 |
+
prompt-toolkit==3.0.43
|
176 |
+
protobuf==3.20.3
|
177 |
+
psutil==5.9.7
|
178 |
+
ptyprocess==0.7.0
|
179 |
+
pure-eval==0.2.2
|
180 |
+
pyarrow==14.0.2
|
181 |
+
pyasn1==0.5.1
|
182 |
+
pyasn1-modules==0.3.0
|
183 |
+
pycocotools==2.0.7
|
184 |
+
pycparser==2.21
|
185 |
+
pycryptodome==3.20.0
|
186 |
+
pydantic==1.10.2
|
187 |
+
pydeck==0.8.1b0
|
188 |
+
pydub==0.25.1
|
189 |
+
pygments==2.17.2
|
190 |
+
pynvml==11.5.0
|
191 |
+
pyparsing==3.1.1
|
192 |
+
pysocks==1.7.1
|
193 |
+
pytest==7.4.4
|
194 |
+
python-dateutil==2.8.2
|
195 |
+
python-dotenv==1.0.0
|
196 |
+
python-multipart==0.0.6
|
197 |
+
pytorch-lightning==2.0.8
|
198 |
+
pytube==15.0.0
|
199 |
+
pytz==2023.3.post1
|
200 |
+
pywavelets==1.5.0
|
201 |
+
pyyaml==6.0.1
|
202 |
+
pyzmq==25.1.2
|
203 |
+
qudida==0.0.4
|
204 |
+
redis==4.5.1
|
205 |
+
referencing==0.32.1
|
206 |
+
regex==2023.12.25
|
207 |
+
requests==2.28.2
|
208 |
+
requests-oauthlib==1.3.1
|
209 |
+
rich==13.4.2
|
210 |
+
rpds-py==0.17.1
|
211 |
+
rsa==4.9
|
212 |
+
safetensors==0.3.3
|
213 |
+
scikit-image==0.22.0
|
214 |
+
scikit-learn==1.3.2
|
215 |
+
scipy==1.11.4
|
216 |
+
semantic-version==2.10.0
|
217 |
+
sentencepiece==0.1.99
|
218 |
+
sentry-sdk==1.39.2
|
219 |
+
setproctitle==1.3.3
|
220 |
+
setuptools==60.2.0
|
221 |
+
shapely==2.0.2
|
222 |
+
six==1.16.0
|
223 |
+
smmap==5.0.1
|
224 |
+
sniffio==1.3.0
|
225 |
+
sounddevice==0.4.6
|
226 |
+
soundfile==0.12.1
|
227 |
+
soupsieve==2.5
|
228 |
+
soxr==0.3.7
|
229 |
+
stack-data==0.6.3
|
230 |
+
starlette==0.35.1
|
231 |
+
streamlit==1.30.0
|
232 |
+
streamlit-drawable-canvas==0.9.3
|
233 |
+
sympy==1.12
|
234 |
+
tabulate==0.9.0
|
235 |
+
tb-nightly==2.11.0a20220906
|
236 |
+
tenacity==8.2.3
|
237 |
+
tensorboard==2.12.0
|
238 |
+
tensorboard-data-server==0.6.1
|
239 |
+
tensorboard-plugin-wit==1.8.1
|
240 |
+
tensorflow==2.12.0
|
241 |
+
tensorflow-estimator==2.12.0
|
242 |
+
tensorflow-io-gcs-filesystem==0.35.0
|
243 |
+
termcolor==2.4.0
|
244 |
+
terminaltables==3.1.10
|
245 |
+
test-tube==0.7.5
|
246 |
+
threadpoolctl==3.2.0
|
247 |
+
tifffile==2023.12.9
|
248 |
+
timm==0.9.12
|
249 |
+
tokenizers==0.13.3
|
250 |
+
toml==0.10.2
|
251 |
+
tomli==2.0.1
|
252 |
+
toolz==0.12.0
|
253 |
+
torch==2.0.1+cu118
|
254 |
+
torch-tb-profiler==0.4.1
|
255 |
+
torchmetrics==1.1.1
|
256 |
+
torchvision==0.15.2+cu118
|
257 |
+
tornado==6.4
|
258 |
+
tqdm==4.65.2
|
259 |
+
traitlets==5.14.1
|
260 |
+
transformers==4.33.1
|
261 |
+
triton==2.0.0
|
262 |
+
typing-extensions==4.9.0
|
263 |
+
tzdata==2023.4
|
264 |
+
tzlocal==5.2
|
265 |
+
urllib3==1.26.18
|
266 |
+
urwid==2.4.2
|
267 |
+
uvicorn==0.26.0
|
268 |
+
validators==0.22.0
|
269 |
+
wandb==0.15.10
|
270 |
+
watchdog==3.0.0
|
271 |
+
wcwidth==0.2.13
|
272 |
+
webdataset==0.2.86
|
273 |
+
webp==0.3.0
|
274 |
+
websockets==11.0.3
|
275 |
+
werkzeug==3.0.1
|
276 |
+
wget==3.2
|
277 |
+
wheel==0.41.2
|
278 |
+
widgetsnbextension==4.0.9
|
279 |
+
wrapt==1.14.1
|
280 |
+
xformers==0.0.21
|
281 |
+
xmltodict==0.13.0
|
282 |
+
xtcocotools==1.14.3
|
283 |
+
yacs==0.1.8
|
284 |
+
yapf==0.40.2
|
285 |
+
yarl==1.9.4
|
286 |
+
zipp==3.17.0
|
287 |
+
zope-interface==6.1
|
288 |
+
fire==0.6.0
|
289 |
+
xlsxwriter
|
290 |
+
git+https://github.com/tencent-ailab/IP-Adapter.git@main
|
291 |
+
git+https://github.com/openai/CLIP.git
|
MMCM/setup.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
from setuptools import setup, find_packages
|
4 |
+
|
5 |
+
with open("README.md", "r") as fh:
|
6 |
+
long_description = fh.read()
|
7 |
+
|
8 |
+
setup(
|
9 |
+
name="mmcm", # used in pip install
|
10 |
+
version="1.0.0",
|
11 |
+
author="anchorxia",
|
12 |
+
author_email="anchorxia@tencent.com",
|
13 |
+
description="process package for multi media cross modal",
|
14 |
+
# long_description=long_description,
|
15 |
+
# long_description_content_type="text/markdown",
|
16 |
+
url="https://github.com/TMElyralab/MMCM",
|
17 |
+
# include_package_data=True, # please edit MANIFEST.in
|
18 |
+
packages=find_packages(), # used in import
|
19 |
+
classifiers=[
|
20 |
+
"Programming Language :: Python :: 3",
|
21 |
+
"License :: OSI Approved :: MIT License",
|
22 |
+
"Operating System :: OS Independent",
|
23 |
+
],
|
24 |
+
install_requires=[],
|
25 |
+
)
|
README-zh.md
ADDED
@@ -0,0 +1,545 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MuseV [English](README.md) [中文](README-zh.md)
|
2 |
+
|
3 |
+
<font size=5>MuseV:基于视觉条件并行去噪的无限长度和高保真虚拟人视频生成。
|
4 |
+
</br>
|
5 |
+
Zhiqiang Xia <sup>\*</sup>,
|
6 |
+
Zhaokang Chen<sup>\*</sup>,
|
7 |
+
Bin Wu<sup>†</sup>,
|
8 |
+
Chao Li,
|
9 |
+
Kwok-Wai Hung,
|
10 |
+
Chao Zhan,
|
11 |
+
Yingjie He,
|
12 |
+
Wenjiang Zhou
|
13 |
+
(<sup>*</sup>co-first author, <sup>†</sup>Corresponding Author, benbinwu@tencent.com)
|
14 |
+
</font>
|
15 |
+
|
16 |
+
**[github](https://github.com/TMElyralab/MuseV)** **[huggingface](https://huggingface.co/TMElyralab/MuseV)** **[HuggingfaceSpace](https://huggingface.co/spaces/AnchorFake/MuseVDemo)** **[project](comming soon)** **Technical report (comming soon)**
|
17 |
+
|
18 |
+
|
19 |
+
我们在2023年3月相信扩散模型可以模拟世界,也开始基于扩散模型研发世界视觉模拟器。`MuseV`是在 2023 年 7 月左右实现的一个里程碑。受到 Sora 进展的启发,我们决定开源 MuseV。MuseV 站在开源的肩膀上成长,也希望能够借此反馈社区。接下来,我们将转向有前景的扩散+变换器方案。
|
20 |
+
|
21 |
+
我们已经发布 <a href="https://github.com/TMElyralab/MuseTalk" style="font-size:24px; color:red;">MuseTalk</a>. `MuseTalk`是一个实时高质量的唇同步模型,可与 `MuseV` 一起构建完整的`虚拟人生成解决方案`。请保持关注!
|
22 |
+
|
23 |
+
# 概述
|
24 |
+
|
25 |
+
`MuseV` 是基于扩散模型的虚拟人视频生成框架,具有以下特点:
|
26 |
+
|
27 |
+
1. 支持使用新颖的视觉条件并行去噪方案进行无限长度生成,不会再有误差累计的问题,尤其适用于固定相机位的场景。
|
28 |
+
1. 提供了基于人物类型数据集训练的虚拟人视频生成预训练模型。
|
29 |
+
1. 支持图像到视频、文本到图像到视频、视频到视频的生成。
|
30 |
+
1. 兼容 `Stable Diffusio`n 文图生成生态系统,包括 `base_model`、`lora`、`controlnet` 等。
|
31 |
+
1. 支持多参考图像技术,包括 `IPAdapter`、`ReferenceOnly`、`ReferenceNet`、`IPAdapterFaceID`。
|
32 |
+
1. 我们后面也会推出训练代码。
|
33 |
+
|
34 |
+
# 重要更新
|
35 |
+
1. `musev_referencenet_pose`: `unet`, `ip_adapter` 的模型名字指定错误,请使用 `musev_referencenet_pose`而不是`musev_referencenet`,请使用最新的main分支。
|
36 |
+
|
37 |
+
# 进展
|
38 |
+
- [2024年3月27日] 发布 `MuseV` 项目和训练好的模型 `musev`、`muse_referencenet`、`muse_referencenet_pose`。
|
39 |
+
- [03/30/2024] 在 huggingface space 上新增 [gui](https://huggingface.co/spaces/AnchorFake/MuseVDemo) 交互方式来生成视频.
|
40 |
+
|
41 |
+
## 模型
|
42 |
+
### 模型结构示意图
|
43 |
+
![model_structure](./data/models/musev_structure.png)
|
44 |
+
### 并行去噪算法示意图
|
45 |
+
![parallel_denoise](./data//models/parallel_denoise.png)
|
46 |
+
|
47 |
+
## 测试用例
|
48 |
+
生成结果的所有帧直接由`MuseV`生成,没有时序超分辨、空间超分辨等任何后处理。
|
49 |
+
<!-- # TODO: // use youtu video link? -->
|
50 |
+
以下所有测试用例都维护在 `configs/tasks/example.yaml`,可以直接运行复现。
|
51 |
+
|
52 |
+
### 输入文本、图像的视频生成
|
53 |
+
#### 人类
|
54 |
+
<table class="center">
|
55 |
+
<tr style="font-weight: bolder;text-align:center;">
|
56 |
+
<td width="50%">image</td>
|
57 |
+
<td width="45%">video </td>
|
58 |
+
<td width="5%">prompt</td>
|
59 |
+
</tr>
|
60 |
+
|
61 |
+
<tr>
|
62 |
+
<td>
|
63 |
+
<img src=./data/images/yongen.jpeg width="400">
|
64 |
+
</td>
|
65 |
+
<td >
|
66 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/732cf1fd-25e7-494e-b462-969c9425d277" width="100" controls preload></video>
|
67 |
+
</td>
|
68 |
+
<td>(masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
69 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
70 |
+
</td>
|
71 |
+
</tr>
|
72 |
+
|
73 |
+
<tr>
|
74 |
+
<td>
|
75 |
+
<img src=./data/images/jinkesi2.jpeg width="400">
|
76 |
+
</td>
|
77 |
+
<td>
|
78 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/62b533d3-95f3-48db-889d-75dde1ad04b7" width="100" controls preload></video>
|
79 |
+
</td>
|
80 |
+
<td>
|
81 |
+
(masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
82 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
83 |
+
</td>
|
84 |
+
</tr>
|
85 |
+
|
86 |
+
<tr>
|
87 |
+
<td>
|
88 |
+
<img src=./data/images/seaside4.jpeg width="400">
|
89 |
+
</td>
|
90 |
+
<td>
|
91 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/9b75a46c-f4e6-45ef-ad02-05729f091c8f" width="100" controls preload></video>
|
92 |
+
</td>
|
93 |
+
<td>
|
94 |
+
(masterpiece, best quality, highres:1), peaceful beautiful sea scene
|
95 |
+
</td>
|
96 |
+
</tr>
|
97 |
+
<tr>
|
98 |
+
<td>
|
99 |
+
<img src=./data/images/seaside_girl.jpeg width="400">
|
100 |
+
</td>
|
101 |
+
<td>
|
102 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/d0f3b401-09bf-4018-81c3-569ec24a4de9" width="100" controls preload></video>
|
103 |
+
</td>
|
104 |
+
<td>
|
105 |
+
(masterpiece, best quality, highres:1), peaceful beautiful sea scene
|
106 |
+
</td>
|
107 |
+
</tr>
|
108 |
+
<!-- guitar -->
|
109 |
+
<tr>
|
110 |
+
<td>
|
111 |
+
<img src=./data/images/boy_play_guitar.jpeg width="400">
|
112 |
+
</td>
|
113 |
+
<td>
|
114 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/61bf955e-7161-44c8-a498-8811c4f4eb4f" width="100" controls preload></video>
|
115 |
+
</td>
|
116 |
+
<td>
|
117 |
+
(masterpiece, best quality, highres:1), playing guitar
|
118 |
+
</td>
|
119 |
+
</tr>
|
120 |
+
<tr>
|
121 |
+
<td>
|
122 |
+
<img src=./data/images/girl_play_guitar2.jpeg width="400">
|
123 |
+
</td>
|
124 |
+
<td>
|
125 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/40982aa7-9f6a-4e44-8ef6-3f185d284e6a" width="100" controls preload></video>
|
126 |
+
</td>
|
127 |
+
<td>
|
128 |
+
(masterpiece, best quality, highres:1), playing guitar
|
129 |
+
</td>
|
130 |
+
</tr>
|
131 |
+
<tr>
|
132 |
+
<td>
|
133 |
+
<img src=./data/images/boy_play_guitar2.jpeg width="400">
|
134 |
+
</td>
|
135 |
+
<td>
|
136 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/69ea9d0c-5ed0-44b9-bca9-a4829c8d8b68" width="100" controls preload></video>
|
137 |
+
</td>
|
138 |
+
<td>
|
139 |
+
(masterpiece, best quality, highres:1), playing guitar
|
140 |
+
</td>
|
141 |
+
</tr>
|
142 |
+
<tr>
|
143 |
+
<td>
|
144 |
+
<img src=./data/images/girl_play_guitar4.jpeg width="400">
|
145 |
+
</td>
|
146 |
+
<td>
|
147 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/d242e8a4-08ab-474f-b4a8-b718780d2991" width="100" controls preload></video>
|
148 |
+
</td>
|
149 |
+
<td>
|
150 |
+
(masterpiece, best quality, highres:1), playing guitar
|
151 |
+
</td>
|
152 |
+
</tr>
|
153 |
+
<!-- famous people -->
|
154 |
+
<tr>
|
155 |
+
<td>
|
156 |
+
<img src=./data/images/dufu.jpeg width="400">
|
157 |
+
</td>
|
158 |
+
<td>
|
159 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/28294baa-b996-420f-b1fb-046542adf87d" width="100" controls preload></video>
|
160 |
+
</td>
|
161 |
+
<td>
|
162 |
+
(masterpiece, best quality, highres:1),(1man, solo:1),(beautiful face,
|
163 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
164 |
+
</td>
|
165 |
+
</tr>
|
166 |
+
|
167 |
+
<tr>
|
168 |
+
<td>
|
169 |
+
<img src=./data/images/Mona_Lisa.jpg width="400">
|
170 |
+
</td>
|
171 |
+
<td>
|
172 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/1ce11da6-14c6-4dcd-b7f9-7a5f060d71fb" width="100" controls preload></video>
|
173 |
+
</td>
|
174 |
+
<td>
|
175 |
+
(masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
176 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
177 |
+
</td>
|
178 |
+
</tr>
|
179 |
+
<tr>
|
180 |
+
<td>
|
181 |
+
<img src=./data/images/Portrait-of-Dr.-Gachet.jpg width="400">
|
182 |
+
</td>
|
183 |
+
<td>
|
184 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/4072410a-ecea-4ee5-a9b4-735f9f462d51" width="100" controls preload></video>
|
185 |
+
</td>
|
186 |
+
<td>
|
187 |
+
(masterpiece, best quality, highres:1),(1man, solo:1),(beautiful face,
|
188 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
189 |
+
</td>
|
190 |
+
</tr>
|
191 |
+
<tr>
|
192 |
+
<td>
|
193 |
+
<img src=./data/images/Self-Portrait-with-Cropped-Hair.jpg width="400">
|
194 |
+
</td>
|
195 |
+
<td>
|
196 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/5148beda-a1e1-44f0-ad84-2fb99ad73a11" width="100" controls preload></video>
|
197 |
+
</td>
|
198 |
+
<td>
|
199 |
+
(masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
200 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
201 |
+
</td>
|
202 |
+
</tr>
|
203 |
+
<tr>
|
204 |
+
<td>
|
205 |
+
<img src=./data/images/The-Laughing-Cavalier.jpg width="400">
|
206 |
+
</td>
|
207 |
+
<td>
|
208 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/df1c5943-15a3-41f5-afe7-e7497c81836d" width="100" controls preload></video>
|
209 |
+
</td>
|
210 |
+
<td>
|
211 |
+
(masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
212 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
213 |
+
</td>
|
214 |
+
</tr>
|
215 |
+
</table >
|
216 |
+
|
217 |
+
#### 场景
|
218 |
+
<table class="center">
|
219 |
+
<tr style="font-weight: bolder;text-align:center;">
|
220 |
+
<td width="35%">image</td>
|
221 |
+
<td width="50%">video</td>
|
222 |
+
<td width="15%">prompt</td>
|
223 |
+
</tr>
|
224 |
+
|
225 |
+
<tr>
|
226 |
+
<td>
|
227 |
+
<img src=./data/images/waterfall4.jpeg width="400">
|
228 |
+
</td>
|
229 |
+
<td>
|
230 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/852daeb6-6b58-4931-81f9-0dddfa1b4ea5" width="100" controls preload></video>
|
231 |
+
</td>
|
232 |
+
<td>
|
233 |
+
(masterpiece, best quality, highres:1), peaceful beautiful waterfall, an
|
234 |
+
endless waterfall
|
235 |
+
</td>
|
236 |
+
</tr>
|
237 |
+
|
238 |
+
<tr>
|
239 |
+
<td>
|
240 |
+
<img src=./data/images/river.jpeg width="400">
|
241 |
+
</td>
|
242 |
+
<td>
|
243 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/d5cb2798-b5ce-497a-a058-ae63d664028e" width="100" controls preload></video>
|
244 |
+
</td>
|
245 |
+
<td>(masterpiece, best quality, highres:1), peaceful beautiful river
|
246 |
+
</td>
|
247 |
+
</tr>
|
248 |
+
|
249 |
+
<tr>
|
250 |
+
<td>
|
251 |
+
<img src=./data/images/seaside2.jpeg width="400">
|
252 |
+
</td>
|
253 |
+
<td>
|
254 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/4a4d527a-6203-411f-afe9-31c992d26816" width="100" controls preload></video>
|
255 |
+
</td>
|
256 |
+
<td>(masterpiece, best quality, highres:1), peaceful beautiful sea scene
|
257 |
+
</td>
|
258 |
+
</tr>
|
259 |
+
</table >
|
260 |
+
|
261 |
+
### 输入视频条件的视频生成
|
262 |
+
当前生成模式下,需要参考视频的首帧条件和参考图像的首帧条件对齐,不然会破坏首帧的信息,效果会更差。所以一般生成流程是
|
263 |
+
1. 确定参考视频;
|
264 |
+
2. 用参考视频的首帧走图生图、controlnet流程,可以使用`MJ`等各种平台;
|
265 |
+
3. 拿2中的生成图、参考视频用MuseV生成视频;
|
266 |
+
4.
|
267 |
+
**pose2video**
|
268 |
+
|
269 |
+
`duffy` 的测试用例中,视觉条件帧的姿势与控制视频的第一帧不对齐。需要`posealign` 将解决这个问题。
|
270 |
+
|
271 |
+
<table class="center">
|
272 |
+
<tr style="font-weight: bolder;text-align:center;">
|
273 |
+
<td width="25%">image</td>
|
274 |
+
<td width="65%">video</td>
|
275 |
+
<td width="10%">prompt</td>
|
276 |
+
</tr>
|
277 |
+
<tr>
|
278 |
+
<td>
|
279 |
+
<img src=./data/images/spark_girl.png width="200">
|
280 |
+
<img src=./data/images/cyber_girl.png width="200">
|
281 |
+
</td>
|
282 |
+
<td>
|
283 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/484cc69d-c316-4464-a55b-3df929780a8e" width="400" controls preload></video>
|
284 |
+
</td>
|
285 |
+
<td>
|
286 |
+
(masterpiece, best quality, highres:1)
|
287 |
+
</td>
|
288 |
+
</tr>
|
289 |
+
<tr>
|
290 |
+
<td>
|
291 |
+
<img src=./data/images/duffy.png width="400">
|
292 |
+
</td>
|
293 |
+
<td>
|
294 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/c44682e6-aafc-4730-8fc1-72825c1bacf2" width="400" controls preload></video>
|
295 |
+
</td>
|
296 |
+
<td>
|
297 |
+
(masterpiece, best quality, highres:1)
|
298 |
+
</td>
|
299 |
+
</tr>
|
300 |
+
</table >
|
301 |
+
|
302 |
+
### MuseTalk
|
303 |
+
|
304 |
+
`talk`的角色`孙昕荧`著名的网络大V,可以在 [抖音](https://www.douyin.com/user/MS4wLjABAAAAWDThbMPN_6Xmm_JgXexbOii1K-httbu2APdG8DvDyM8) 关注。
|
305 |
+
|
306 |
+
<table class="center">
|
307 |
+
<tr style="font-weight: bolder;">
|
308 |
+
<td width="35%">name</td>
|
309 |
+
<td width="50%">video</td>
|
310 |
+
</tr>
|
311 |
+
|
312 |
+
<tr>
|
313 |
+
<td>
|
314 |
+
talk
|
315 |
+
</td>
|
316 |
+
<td>
|
317 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/951188d1-4731-4e7f-bf40-03cacba17f2f" width="100" controls preload></video>
|
318 |
+
</td>
|
319 |
+
</tr>
|
320 |
+
<tr>
|
321 |
+
<td>
|
322 |
+
talk
|
323 |
+
</td>
|
324 |
+
<td>
|
325 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/ba0396ab-8aba-4440-803c-18b078ae1dd9" width="100" controls preload></video>
|
326 |
+
</td>
|
327 |
+
</tr>
|
328 |
+
<tr>
|
329 |
+
<td>
|
330 |
+
sing
|
331 |
+
</td>
|
332 |
+
<td>
|
333 |
+
<video src="https://github.com/TMElyralab/MuseV/assets/163980830/50b8ffab-9307-4836-99e5-947e6ce7d112" width="100" controls preload></video>
|
334 |
+
</td>
|
335 |
+
</tr>
|
336 |
+
</table >
|
337 |
+
|
338 |
+
|
339 |
+
# 待办事项:
|
340 |
+
- [ ] 技术报告(即将推出)。
|
341 |
+
- [ ] 训练代码。
|
342 |
+
- [ ] 扩散变换生成框架。
|
343 |
+
- [ ] `posealign` 模块。
|
344 |
+
|
345 |
+
# 快速入门
|
346 |
+
准备 Python 环境并安装额外的包,如 `diffusers`、`controlnet_aux`、`mmcm`。
|
347 |
+
|
348 |
+
## 准备环境
|
349 |
+
建议您优先使用 `docker` 来准备 Python 环境。
|
350 |
+
|
351 |
+
### 准备 Python 环境
|
352 |
+
**注意**:我们只测试了 Docker,使用 conda 或其他环境可能会遇到问题。我们将尽力解决。但依然请优先使用 `docker`。
|
353 |
+
|
354 |
+
#### 方法 1:使用 Docker
|
355 |
+
1. 拉取 Docker 镜像
|
356 |
+
```bash
|
357 |
+
docker pull anchorxia/musev:latest
|
358 |
+
```
|
359 |
+
2. 运行 Docker 容器
|
360 |
+
```bash
|
361 |
+
docker run --gpus all -it --entrypoint /bin/bash anchorxia/musev:latest
|
362 |
+
```
|
363 |
+
docker启动后默认的 conda 环境是 `musev`。
|
364 |
+
|
365 |
+
#### 方法 2:使用 conda
|
366 |
+
从 environment.yaml 创建 conda 环境
|
367 |
+
```
|
368 |
+
conda env create --name musev --file ./environment.yml
|
369 |
+
```
|
370 |
+
#### 方法 3:使用 pip requirements
|
371 |
+
```
|
372 |
+
pip install -r requirements.txt
|
373 |
+
```
|
374 |
+
#### 准备 [openmmlab](https://openmmlab.com/) 包
|
375 |
+
如果不使用 Docker方式,还需要额外安装 mmlab 包。
|
376 |
+
```bash
|
377 |
+
pip install--no-cache-dir -U openmim
|
378 |
+
mim install mmengine
|
379 |
+
mim install "mmcv>=2.0.1"
|
380 |
+
mim install "mmdet>=3.1.0"
|
381 |
+
mim install "mmpose>=1.1.0"
|
382 |
+
```
|
383 |
+
|
384 |
+
### 准备我们开发的包
|
385 |
+
#### 下载
|
386 |
+
```bash
|
387 |
+
git clone --recursive https://github.com/TMElyralab/MuseV.git
|
388 |
+
```
|
389 |
+
#### 准备 PYTHONPATH
|
390 |
+
```bash
|
391 |
+
current_dir=$(pwd)
|
392 |
+
export PYTHONPATH=${PYTHONPATH}:${current_dir}/MuseV
|
393 |
+
export PYTHONPATH=${PYTHONPATH}:${current_dir}/MuseV/MMCM
|
394 |
+
export PYTHONPATH=${PYTHONPATH}:${current_dir}/MuseV/diffusers/src
|
395 |
+
export PYTHONPATH=${PYTHONPATH}:${current_dir}/MuseV/controlnet_aux/src
|
396 |
+
cd MuseV
|
397 |
+
```
|
398 |
+
|
399 |
+
1. `MMCM`:多媒体、跨模态处理包。
|
400 |
+
1. `diffusers`:基于 [diffusers](https://github.com/huggingface/diffusers) 修改的 diffusers 包。
|
401 |
+
1. `controlnet_aux`:基于 [controlnet_aux](https://github.com/TMElyralab/controlnet_aux) 修改的包。
|
402 |
+
|
403 |
+
|
404 |
+
## 下载模型
|
405 |
+
```bash
|
406 |
+
git clone https://huggingface.co/TMElyralab/MuseV ./checkpoints
|
407 |
+
```
|
408 |
+
- `motion`:多个版本的视频生成模型。使用小数据集 `ucf101` 和小 `webvid` 数据子集进行训练,约 60K 个视频文本对。GPU 内存消耗测试在 `resolution` $=512*512,`time_size=12`。
|
409 |
+
- `musev/unet`:这个版本 仅训练 `unet` 运动模块。推断 `GPU 内存消耗` $\approx 8G$。
|
410 |
+
- `musev_referencenet`:这个版本训练 `unet` 运动模块、`referencenet`、`IPAdapter`。推断 `GPU 内存消耗` $\approx 12G$。
|
411 |
+
- `unet`:`motion` 模块,具有 `Attention` 层中的 `to_k`、`to_v`,参考 `IPAdapter`。
|
412 |
+
- `referencenet`:类似于 `AnimateAnyone`。
|
413 |
+
- `ip_adapter_image_proj.bin`:图像特征变换层,参考 `IPAdapter`。
|
414 |
+
- `musev_referencenet_pose`:这个版本基于 `musev_referencenet`,固定 `referencenet` 和 `controlnet_pose`,训练 `unet motion` 和 `IPAdapter`。推断 `GPU 内存消���` $\approx 12G$。
|
415 |
+
- `t2i/sd1.5`:text2image 模型,在训练运动模块时参数被冻结。
|
416 |
+
- majicmixRealv6Fp16:示例,可以替换为其他 t2i 基础。从 [majicmixRealv6Fp16](https://civitai.com/models/43331/majicmix-realistic) 下载。
|
417 |
+
- `IP-Adapter/models`:从 [IPAdapter](https://huggingface.co/h94/IP-Adapter/tree/main) 下载。
|
418 |
+
- `image_encoder`:视觉特征抽取模型。
|
419 |
+
- `ip-adapter_sd15.bin`:原始 IPAdapter 模型预训练权重。
|
420 |
+
- `ip-adapter-faceid_sd15.bin`:原始 IPAdapter 模型预训练权重。
|
421 |
+
|
422 |
+
## 推理
|
423 |
+
|
424 |
+
### 准备模型路径
|
425 |
+
当使用示例推断命令运行示例任务时,可以跳过此步骤。
|
426 |
+
该模块主要是在配置文件中设置模型路径和缩写,以在推断脚本中使用简单缩写而不是完整路径。
|
427 |
+
- T2I SD:参考 `musev/configs/model/T2I_all_model.py`
|
428 |
+
- 运动 Unet:参考 `musev/configs/model/motion_model.py`
|
429 |
+
- 任务:参考 `musev/configs/tasks/example.yaml`
|
430 |
+
|
431 |
+
### musev_referencenet
|
432 |
+
#### 输入文本、图像的视频生成
|
433 |
+
```bash
|
434 |
+
python scripts/inference/text2video.py --sd_model_name majicmixRealv6Fp16 --unet_model_name musev_referencenet --referencenet_model_name musev_referencenet --ip_adapter_model_name musev_referencenet -test_data_path ./configs/tasks/example.yaml --output_dir ./output --n_batch 1 --target_datas yongen --vision_clip_extractor_class_name ImageClipVisionFeatureExtractor --vision_clip_model_path ./checkpoints/IP-Adapter/models/image_encoder --time_size 12 --fps 12
|
435 |
+
```
|
436 |
+
**通用参数**:
|
437 |
+
- `test_data_path`:测试用例 任务路径
|
438 |
+
- `target_datas`:如果 `test_data_path` 中的 `name` 在 `target_datas` 中,则只运行这些子任务。`sep` 是 `,`;
|
439 |
+
- `sd_model_cfg_path`:T2I sd 模型路径,模型配置路径或模型路径。
|
440 |
+
- `sd_model_name`:sd 模型名称,用于在 `sd_model_cfg_path` 中选择完整模型路径。使用 `,` 分隔的多个模型名称,或 `all`。
|
441 |
+
- `unet_model_cfg_path`:运动 unet 模型配置路径或模型路径。
|
442 |
+
- `unet_model_name`:unet 模型名称,用于获取 `unet_model_cfg_path` 中的模型路径,并在 `musev/models/unet_loader.py` 中初始化 unet 类实例。使用 `,` 分隔的多个模型名称,或 `all`。如果 `unet_model_cfg_path` 是模型路径,则 `unet_name` 必须在 `musev/models/unet_loader.py` 中支持。
|
443 |
+
- `time_size`:扩散模型每次生成一个片段,这里是一个片段的帧数。默认为 `12`。
|
444 |
+
- `n_batch`:首尾相连方式生成总片段数,$total\_frames=n\_batch * time\_size + n\_viscond$,默认为 `1`。
|
445 |
+
- `context_frames`: 并行去噪子窗口一次生成的帧数。如果 `time_size` > `context_frame`,则会启动并行去噪逻辑, `time_size` 窗口会分成多个子窗口进行并行去噪。默认为 `12`。
|
446 |
+
|
447 |
+
生成长视频,有两种方法,可以共同使用:
|
448 |
+
1. `视觉条件并行去噪`:设置 `n_batch=1`,`time_size` = 想要的所有帧。
|
449 |
+
2. `传统的首尾相连方式`:设置 `time_size` = `context_frames` = 一次片段的帧数 (`12`),`context_overlap` = 0。会首尾相连方式生成`n_batch`片段数,首尾相连存在误差累计,当`n_batch`越大,最后的结果越差。
|
450 |
+
|
451 |
+
|
452 |
+
**模型参数**:
|
453 |
+
支持 `referencenet`、`IPAdapter`、`IPAdapterFaceID`、`Facein`。
|
454 |
+
- `referencenet_model_name`:`referencenet` 模型名称。
|
455 |
+
- `ImageClipVisionFeatureExtractor`:`ImageEmbExtractor` 名称,在 `IPAdapter` 中提取视觉特征。
|
456 |
+
- `vision_clip_model_path`:`ImageClipVisionFeatureExtractor` 模型路径。
|
457 |
+
- `ip_adapter_model_name`:来自 `IPAdapter` 的,它是 `ImagePromptEmbProj`,与 `ImageEmbExtractor` 一起使用。
|
458 |
+
- `ip_adapter_face_model_name`:`IPAdapterFaceID`,来自 `IPAdapter`,应该设置 `face_image_path`。
|
459 |
+
|
460 |
+
**一些影响运动范围和生成结果的参数**:
|
461 |
+
- `video_guidance_scale`:类似于 text2image,控制 cond 和 uncond 之间的影响,影响较大,默认为 `3.5`。
|
462 |
+
- `guidance_scale`:在第一帧图像中 cond 和 uncond 之间的参数比例,,影响不大,默认为 `3.5`。
|
463 |
+
- `use_condition_image`:是否使用给定的第一帧进行视频生成。
|
464 |
+
- `redraw_condition_image`:是否重新绘制给定的第一帧图像。
|
465 |
+
- `video_negative_prompt`:配置文件中全 `negative_prompt` 的缩写。默认为 `V2`。
|
466 |
+
|
467 |
+
|
468 |
+
#### 输入视频的视频生成
|
469 |
+
```bash
|
470 |
+
python scripts/inference/video2video.py --sd_model_name majicmixRealv6Fp16 --unet_model_name musev_referencenet --referencenet_model_name musev_referencenet --ip_adapter_model_name musev_referencenet -test_data_path ./configs/tasks/example.yaml --vision_clip_extractor_class_name ImageClipVisionFeatureExtractor --vision_clip_model_path ./checkpoints/IP-Adapter/models/image_encoder --output_dir ./output --n_batch 1 --controlnet_name dwpose_body_hand --which2video "video_middle" --target_datas dacne1 --fps 12 --time_size 12
|
471 |
+
```
|
472 |
+
**一些重要参数**
|
473 |
+
|
474 |
+
大多数参数与 `musev_text2video` 相同。`video2video` 的特殊参数有:
|
475 |
+
1. 需要在 `test_data` 中设置 `video_path`。现在支持 `rgb video` 和 `controlnet_middle_video`。
|
476 |
+
- `which2video`: 参与引导视频视频的参考视频部分。 如果是 `video_middle`,则只使用类似`pose`、`depth`的 `video_middle`,如果是 `video`, 视频本身会参与视频噪声初始化。等价于`img2imge`。
|
477 |
+
- `controlnet_name`:是否使用 `controlnet condition`,例如 `dwpose,depth`, pose的话 优先建议使用`dwpose_body_hand`。
|
478 |
+
- `video_is_middle`:`video_path` 是 `rgb video` 还是 `controlnet_middle_video`。可以为 `test_data_path` 中的每个 `test_data` 设置。
|
479 |
+
- `video_has_condition`:condtion_images 是否与 video_path 的第一帧对齐。如果不是,则首先生成 `condition_images`,然后与拼接对齐。设置在 `test_data` 中。
|
480 |
+
|
481 |
+
所有 `controlnet_names` 维护在 [mmcm](https://github.com/TMElyralab/MMCM/blob/main/mmcm/vision/feature_extractor/controlnet.py#L513)
|
482 |
+
```python
|
483 |
+
['pose', 'pose_body', 'pose_hand', 'pose_face', 'pose_hand_body', 'pose_hand_face', 'dwpose', 'dwpose_face', 'dwpose_hand', 'dwpose_body', 'dwpose_body_hand', 'canny', 'tile', 'hed', 'hed_scribble', 'depth', 'pidi', 'normal_bae', 'lineart', 'lineart_anime', 'zoe', 'sam', 'mobile_sam', 'leres', 'content', 'face_detector']
|
484 |
+
```
|
485 |
+
|
486 |
+
### musev_referencenet_pose
|
487 |
+
仅用于 `pose2video`
|
488 |
+
基于 `musev_referencenet` 训练,固定 `referencenet`、`pose-controlnet` 和 `T2I`,训练 `motion` 模块和 `IPAdapter`。
|
489 |
+
```bash
|
490 |
+
python scripts/inference/video2video.py --sd_model_name majicmixRealv6Fp16 --unet_model_name musev_referencenet_pose --referencenet_model_name musev_referencenet --ip_adapter_model_name musev_referencenet_pose -test_data_path ./configs/tasks/example.yaml --vision_clip_extractor_class_name ImageClipVisionFeatureExtractor --vision_clip_model_path ./checkpoints/IP-Adapter/models/image_encoder --output_dir ./output --n_batch 1 --controlnet_name dwpose_body_hand --which2video "video_middle" --target_datas dacne1 --fps 12 --time_size 12
|
491 |
+
```
|
492 |
+
|
493 |
+
### musev
|
494 |
+
仅有动作模块,没有 referencenet,需要更少的 GPU 内存。
|
495 |
+
#### 文本到视频
|
496 |
+
```bash
|
497 |
+
python scripts/inference/text2video.py --sd_model_name majicmixRealv6Fp16 --unet_model_name musev -test_data_path ./configs/tasks/example.yaml --output_dir ./output --n_batch 1 --target_datas yongen --time_size 12 --fps 12
|
498 |
+
```
|
499 |
+
#### 视频到视频
|
500 |
+
```bash
|
501 |
+
python scripts/inference/video2video.py --sd_model_name majicmixRealv6Fp16 --unet_model_name musev -test_data_path ./configs/tasks/example.yaml --output_dir ./output --n_batch 1 --controlnet_name dwpose_body_hand --which2video "video_middle" --target_datas dacne1 --fps 12 --time_size 12
|
502 |
+
```
|
503 |
+
|
504 |
+
### Gradio 演示
|
505 |
+
MuseV 提供 gradio 脚本,可在本地机器上生成 GUI,方便生成视频。
|
506 |
+
|
507 |
+
```bash
|
508 |
+
cd scripts/gradio
|
509 |
+
python app.py
|
510 |
+
```
|
511 |
+
|
512 |
+
# 致谢
|
513 |
+
1. MuseV 开发过程中参考学习了很多开源工作 [TuneAVideo](https://github.com/showlab/Tune-A-Video)、[diffusers](https://github.com/huggingface/diffusers)、[Moore-AnimateAnyone](https://github.com/MooreThreads/Moore-AnimateAnyone/tree/master/src/pipelines)、[animatediff](https://github.com/guoyww/AnimateDiff)、[IP-Adapter](https://github.com/tencent-ailab/IP-Adapter)、[AnimateAnyone](https://arxiv.org/abs/2311.17117)、[VideoFusion](https://arxiv.org/abs/2303.08320) 和 [insightface](https://github.com/deepinsight/insightface)。
|
514 |
+
2. MuseV 基于 `ucf101` 和 `webvid` 数据集构建。
|
515 |
+
|
516 |
+
感谢开源社区的贡献!
|
517 |
+
|
518 |
+
# 限制
|
519 |
+
|
520 |
+
`MuseV` 仍然存在很多待优化项,包括:
|
521 |
+
|
522 |
+
1. 缺乏泛化能力。对视觉条件帧敏感,有些视觉条件图像表现良好,有些表现不佳。有些预训练的 t2i 模型表现良好,有些表现不佳。
|
523 |
+
1. 有限的视频生成类型和有限的动作范围,部分原因是训练数据类型有限。发布的 `MuseV` 已经在大约 6 万对分辨率为 `512*320` 的人类文本视频对上进行了训练。`MuseV` 在较低分辨率下具有更大的动作范围,但视频质量较低。`MuseV` 在高分辨率下画质很好、但动作范围较小。在更大、更高分辨率、更高质量的文本视频数据集上进行训练可能会使 `MuseV` 更好。
|
524 |
+
1. 因为使用 `webvid` 训练会有水印问题。使用没有水印的、更干净的数据集可能会解决这个问题。
|
525 |
+
1. 有限类型的长视频生成。视觉条件并行去噪可以解决视频生成的累积误差,但当前的方法只适用于相对固定的摄像机场景。
|
526 |
+
1. referencenet 和 IP-Adapter 训练不足,因为时间有限和资源有限。
|
527 |
+
1. 代码结构不够完善。`MuseV` 支持丰富而动态的功能,但代码复杂且未经过重构。熟悉需要时间。
|
528 |
+
|
529 |
+
|
530 |
+
<!-- # Contribution 暂时不需要组织开源共建 -->
|
531 |
+
# 引用
|
532 |
+
```bib
|
533 |
+
@article{musev,
|
534 |
+
title={MuseV: 基于视觉条件的并行去噪的无限长度和高保真虚拟人视频生成},
|
535 |
+
author={Xia, Zhiqiang and Chen, Zhaokang and Wu, Bin and Li, Chao and Hung, Kwok-Wai and Zhan, Chao and He, Yingjie and Zhou, Wenjiang},
|
536 |
+
journal={arxiv},
|
537 |
+
year={2024}
|
538 |
+
}
|
539 |
+
```
|
540 |
+
# 免责声明/许可
|
541 |
+
1. `代码`:`MuseV` 的代码采用 `MIT` 许可证发布,学术用途和商业用途都可以。
|
542 |
+
1. `模型`:训练好的模型仅供非商业研究目的使用。
|
543 |
+
1. `其他开源模型`:使用的其他开源模型必须遵守他们的许可证,如 `insightface`、`IP-Adapter`、`ft-mse-vae` 等。
|
544 |
+
1. 测试数据收集自互联网,仅供非商业研究目的使用。
|
545 |
+
1. `AIGC`:本项目旨在积极影响基于人工智能的视频生成领域。用户被授予使用此工具创建视频的自由,但他们应该遵守当地法律,并负责任地使用。开发人员不对用户可能的不当使用承担任何责任。
|
configs/model/T2I_all_model.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
|
4 |
+
T2IDir = os.path.join(
|
5 |
+
os.path.dirname(os.path.abspath(__file__)), "../../checkpoints", "t2i"
|
6 |
+
)
|
7 |
+
|
8 |
+
MODEL_CFG = {
|
9 |
+
"majicmixRealv6Fp16": {
|
10 |
+
"sd": os.path.join(T2IDir, "sd1.5/majicmixRealv6Fp16"),
|
11 |
+
},
|
12 |
+
"fantasticmix_v10": {
|
13 |
+
"sd": os.path.join(T2IDir, "sd1.5/fantasticmix_v10"),
|
14 |
+
},
|
15 |
+
}
|
configs/model/ip_adapter.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
IPAdapterModelDir = os.path.join(
|
4 |
+
os.path.dirname(os.path.abspath(__file__)), "../../checkpoints", "IP-Adapter"
|
5 |
+
)
|
6 |
+
|
7 |
+
|
8 |
+
MotionDir = os.path.join(
|
9 |
+
os.path.dirname(os.path.abspath(__file__)), "../../checkpoints", "motion"
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
MODEL_CFG = {
|
14 |
+
"IPAdapter": {
|
15 |
+
"ip_image_encoder": os.path.join(IPAdapterModelDir, "models/image_encoder"),
|
16 |
+
"ip_ckpt": os.path.join(IPAdapterModelDir, "ip-adapter_sd15.bin"),
|
17 |
+
"ip_scale": 1.0,
|
18 |
+
"clip_extra_context_tokens": 4,
|
19 |
+
"clip_embeddings_dim": 1024,
|
20 |
+
"desp": "",
|
21 |
+
},
|
22 |
+
"IPAdapterPlus": {
|
23 |
+
"ip_image_encoder": os.path.join(IPAdapterModelDir, "image_encoder"),
|
24 |
+
"ip_ckpt": os.path.join(IPAdapterModelDir, "ip-adapter-plus_sd15.bin"),
|
25 |
+
"ip_scale": 1.0,
|
26 |
+
"clip_extra_context_tokens": 16,
|
27 |
+
"clip_embeddings_dim": 1024,
|
28 |
+
"desp": "",
|
29 |
+
},
|
30 |
+
"IPAdapterPlus-face": {
|
31 |
+
"ip_image_encoder": os.path.join(IPAdapterModelDir, "image_encoder"),
|
32 |
+
"ip_ckpt": os.path.join(IPAdapterModelDir, "ip-adapter-plus-face_sd15.bin"),
|
33 |
+
"ip_scale": 1.0,
|
34 |
+
"clip_extra_context_tokens": 16,
|
35 |
+
"clip_embeddings_dim": 1024,
|
36 |
+
"desp": "",
|
37 |
+
},
|
38 |
+
"IPAdapterFaceID": {
|
39 |
+
"ip_image_encoder": os.path.join(IPAdapterModelDir, "image_encoder"),
|
40 |
+
"ip_ckpt": os.path.join(IPAdapterModelDir, "ip-adapter-faceid_sd15.bin"),
|
41 |
+
"ip_scale": 1.0,
|
42 |
+
"clip_extra_context_tokens": 4,
|
43 |
+
"clip_embeddings_dim": 512,
|
44 |
+
"desp": "",
|
45 |
+
},
|
46 |
+
"musev_referencenet": {
|
47 |
+
"ip_image_encoder": os.path.join(IPAdapterModelDir, "image_encoder"),
|
48 |
+
"ip_ckpt": os.path.join(
|
49 |
+
MotionDir, "musev_referencenet/ip_adapter_image_proj.bin"
|
50 |
+
),
|
51 |
+
"ip_scale": 1.0,
|
52 |
+
"clip_extra_context_tokens": 4,
|
53 |
+
"clip_embeddings_dim": 1024,
|
54 |
+
"desp": "",
|
55 |
+
},
|
56 |
+
"musev_referencenet_pose": {
|
57 |
+
"ip_image_encoder": os.path.join(IPAdapterModelDir, "image_encoder"),
|
58 |
+
"ip_ckpt": os.path.join(
|
59 |
+
MotionDir, "musev_referencenet_pose/ip_adapter_image_proj.bin"
|
60 |
+
),
|
61 |
+
"ip_scale": 1.0,
|
62 |
+
"clip_extra_context_tokens": 4,
|
63 |
+
"clip_embeddings_dim": 1024,
|
64 |
+
"desp": "",
|
65 |
+
},
|
66 |
+
}
|
configs/model/lcm_model.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
|
4 |
+
LCMDir = os.path.join(
|
5 |
+
os.path.dirname(os.path.abspath(__file__)), "../../checkpoints", "lcm"
|
6 |
+
)
|
7 |
+
|
8 |
+
|
9 |
+
MODEL_CFG = {
|
10 |
+
"lcm": {
|
11 |
+
os.path.join(LCMDir, "lcm-lora-sdv1-5/pytorch_lora_weights.safetensors"): {
|
12 |
+
"strength": 1.0,
|
13 |
+
"lora_block_weight": "ALL",
|
14 |
+
"strength_offset": 0,
|
15 |
+
},
|
16 |
+
},
|
17 |
+
}
|
configs/model/motion_model.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
|
4 |
+
MotionDIr = os.path.join(
|
5 |
+
os.path.dirname(os.path.abspath(__file__)), "../../checkpoints", "motion"
|
6 |
+
)
|
7 |
+
|
8 |
+
|
9 |
+
MODEL_CFG = {
|
10 |
+
"musev": {
|
11 |
+
"unet": os.path.join(MotionDIr, "musev"),
|
12 |
+
"desp": "only train unet motion module, fix t2i",
|
13 |
+
},
|
14 |
+
"musev_referencenet": {
|
15 |
+
"unet": os.path.join(MotionDIr, "musev_referencenet"),
|
16 |
+
"desp": "train referencenet, IPAdapter and unet motion module, fix t2i",
|
17 |
+
},
|
18 |
+
"musev_referencenet_pose": {
|
19 |
+
"unet": os.path.join(MotionDIr, "musev_referencenet_pose"),
|
20 |
+
"desp": "train unet motion module and IPAdapter, fix t2i and referencenet",
|
21 |
+
},
|
22 |
+
}
|
configs/model/negative_prompt.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Negative_Prompt_CFG = {
|
2 |
+
"Empty": {
|
3 |
+
"base_model": "",
|
4 |
+
"prompt": "",
|
5 |
+
"refer": "",
|
6 |
+
},
|
7 |
+
"V1": {
|
8 |
+
"base_model": "",
|
9 |
+
"prompt": "nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, tail, watermarks",
|
10 |
+
"refer": "",
|
11 |
+
},
|
12 |
+
"V2": {
|
13 |
+
"base_model": "",
|
14 |
+
"prompt": "badhandv4, ng_deepnegative_v1_75t, (((multiple heads))), (((bad body))), (((two people))), ((extra arms)), ((deformed body)), (((sexy))), paintings,(((two heads))), ((big head)),sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, glans, (((nsfw))), nipples, extra fingers, (extra legs), (long neck), mutated hands, (fused fingers), (too many fingers)",
|
15 |
+
"refer": "Weiban",
|
16 |
+
},
|
17 |
+
"V3": {
|
18 |
+
"base_model": "",
|
19 |
+
"prompt": "badhandv4, ng_deepnegative_v1_75t, bad quality",
|
20 |
+
"refer": "",
|
21 |
+
},
|
22 |
+
"V4": {
|
23 |
+
"base_model": "",
|
24 |
+
"prompt": "badhandv4,ng_deepnegative_v1_75t,EasyNegativeV2,bad_prompt_version2-neg,bad quality",
|
25 |
+
"refer": "",
|
26 |
+
},
|
27 |
+
"V5": {
|
28 |
+
"base_model": "",
|
29 |
+
"prompt": "(((multiple heads))), (((bad body))), (((two people))), ((extra arms)), ((deformed body)), (((sexy))), paintings,(((two heads))), ((big head)),sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, glans, (((nsfw))), nipples, extra fingers, (extra legs), (long neck), mutated hands, (fused fingers), (too many fingers)",
|
30 |
+
"refer": "Weiban",
|
31 |
+
},
|
32 |
+
}
|
configs/model/referencenet.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
|
4 |
+
MotionDIr = os.path.join(
|
5 |
+
os.path.dirname(os.path.abspath(__file__)), "../../checkpoints", "motion"
|
6 |
+
)
|
7 |
+
|
8 |
+
|
9 |
+
MODEL_CFG = {
|
10 |
+
"musev_referencenet": {
|
11 |
+
"net": os.path.join(MotionDIr, "musev_referencenet"),
|
12 |
+
"desp": "",
|
13 |
+
},
|
14 |
+
}
|
configs/tasks/example.yaml
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# - name: task_name
|
2 |
+
# condition_images: vision condition images path
|
3 |
+
# video_path: str, default null, used for video2video
|
4 |
+
# prompt: text to guide image generation
|
5 |
+
# ipadapter_image: image_path for IP-Apdater
|
6 |
+
# refer_image: image_path for referencenet, generally speaking, same as ipadapter_image
|
7 |
+
# height: int # The shorter the image size, the larger the motion amplitude, and the lower video quality.
|
8 |
+
# width: int # The longer the W&H, the smaller the motion amplitude, and the higher video quality.
|
9 |
+
# img_length_ratio: float, generation video size is (height, width) * img_length_ratio
|
10 |
+
|
11 |
+
# text/image2video
|
12 |
+
- condition_images: ./data/images/yongen.jpeg
|
13 |
+
eye_blinks_factor: 1.8
|
14 |
+
height: 1308
|
15 |
+
img_length_ratio: 0.957
|
16 |
+
ipadapter_image: ${.condition_images}
|
17 |
+
name: yongen
|
18 |
+
prompt: (masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
19 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
20 |
+
refer_image: ${.condition_images}
|
21 |
+
video_path: null
|
22 |
+
width: 736
|
23 |
+
- condition_images: ./data/images/jinkesi2.jpeg
|
24 |
+
eye_blinks_factor: 1.8
|
25 |
+
height: 714
|
26 |
+
img_length_ratio: 1.25
|
27 |
+
ipadapter_image: ${.condition_images}
|
28 |
+
name: jinkesi2
|
29 |
+
prompt: (masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
30 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
31 |
+
refer_image: ${.condition_images}
|
32 |
+
video_path: null
|
33 |
+
width: 563
|
34 |
+
- condition_images: ./data/images/seaside4.jpeg
|
35 |
+
eye_blinks_factor: 1.8
|
36 |
+
height: 317
|
37 |
+
img_length_ratio: 2.221
|
38 |
+
ipadapter_image: ${.condition_images}
|
39 |
+
name: seaside4
|
40 |
+
prompt: (masterpiece, best quality, highres:1), peaceful beautiful sea scene
|
41 |
+
refer_image: ${.condition_images}
|
42 |
+
video_path: null
|
43 |
+
width: 564
|
44 |
+
- condition_images: ./data/images/seaside_girl.jpeg
|
45 |
+
eye_blinks_factor: 1.8
|
46 |
+
height: 736
|
47 |
+
img_length_ratio: 0.957
|
48 |
+
ipadapter_image: ${.condition_images}
|
49 |
+
name: seaside_girl
|
50 |
+
prompt: (masterpiece, best quality, highres:1), peaceful beautiful sea scene
|
51 |
+
refer_image: ${.condition_images}
|
52 |
+
video_path: null
|
53 |
+
width: 736
|
54 |
+
- condition_images: ./data/images/boy_play_guitar.jpeg
|
55 |
+
eye_blinks_factor: 1.8
|
56 |
+
height: 846
|
57 |
+
img_length_ratio: 1.248
|
58 |
+
ipadapter_image: ${.condition_images}
|
59 |
+
name: boy_play_guitar
|
60 |
+
prompt: (masterpiece, best quality, highres:1), playing guitar
|
61 |
+
refer_image: ${.condition_images}
|
62 |
+
video_path: null
|
63 |
+
width: 564
|
64 |
+
- condition_images: ./data/images/girl_play_guitar2.jpeg
|
65 |
+
eye_blinks_factor: 1.8
|
66 |
+
height: 1002
|
67 |
+
img_length_ratio: 1.248
|
68 |
+
ipadapter_image: ${.condition_images}
|
69 |
+
name: girl_play_guitar2
|
70 |
+
prompt: (masterpiece, best quality, highres:1), playing guitar
|
71 |
+
refer_image: ${.condition_images}
|
72 |
+
video_path: null
|
73 |
+
width: 564
|
74 |
+
- condition_images: ./data/images/boy_play_guitar2.jpeg
|
75 |
+
eye_blinks_factor: 1.8
|
76 |
+
height: 630
|
77 |
+
img_length_ratio: 1.676
|
78 |
+
ipadapter_image: ${.condition_images}
|
79 |
+
name: boy_play_guitar2
|
80 |
+
prompt: (masterpiece, best quality, highres:1), playing guitar
|
81 |
+
refer_image: ${.condition_images}
|
82 |
+
video_path: null
|
83 |
+
width: 420
|
84 |
+
- condition_images: ./data/images/girl_play_guitar4.jpeg
|
85 |
+
eye_blinks_factor: 1.8
|
86 |
+
height: 846
|
87 |
+
img_length_ratio: 1.248
|
88 |
+
ipadapter_image: ${.condition_images}
|
89 |
+
name: girl_play_guitar4
|
90 |
+
prompt: (masterpiece, best quality, highres:1), playing guitar
|
91 |
+
refer_image: ${.condition_images}
|
92 |
+
video_path: null
|
93 |
+
width: 564
|
94 |
+
- condition_images: ./data/images/dufu.jpeg
|
95 |
+
eye_blinks_factor: 1.8
|
96 |
+
height: 500
|
97 |
+
img_length_ratio: 1.495
|
98 |
+
ipadapter_image: ${.condition_images}
|
99 |
+
name: dufu
|
100 |
+
prompt: (masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
101 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
102 |
+
refer_image: ${.condition_images}
|
103 |
+
video_path: null
|
104 |
+
width: 471
|
105 |
+
- condition_images: ./data/images/Mona_Lisa..jpg
|
106 |
+
eye_blinks_factor: 1.8
|
107 |
+
height: 894
|
108 |
+
img_length_ratio: 1.173
|
109 |
+
ipadapter_image: ${.condition_images}
|
110 |
+
name: Mona_Lisa.
|
111 |
+
prompt: (masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
112 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
113 |
+
refer_image: ${.condition_images}
|
114 |
+
video_path: null
|
115 |
+
width: 600
|
116 |
+
- condition_images: ./data/images/Portrait-of-Dr.-Gachet.jpg
|
117 |
+
eye_blinks_factor: 1.8
|
118 |
+
height: 985
|
119 |
+
img_length_ratio: 0.88
|
120 |
+
ipadapter_image: ${.condition_images}
|
121 |
+
name: Portrait-of-Dr.-Gachet
|
122 |
+
prompt: (masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
123 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
124 |
+
refer_image: ${.condition_images}
|
125 |
+
video_path: null
|
126 |
+
width: 800
|
127 |
+
- condition_images: ./data/images/Self-Portrait-with-Cropped-Hair.jpg
|
128 |
+
eye_blinks_factor: 1.8
|
129 |
+
height: 565
|
130 |
+
img_length_ratio: 1.246
|
131 |
+
ipadapter_image: ${.condition_images}
|
132 |
+
name: Self-Portrait-with-Cropped-Hair
|
133 |
+
prompt: (masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
134 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
135 |
+
refer_image: ${.condition_images}
|
136 |
+
video_path: null
|
137 |
+
width: 848
|
138 |
+
- condition_images: ./data/images/The-Laughing-Cavalier.jpg
|
139 |
+
eye_blinks_factor: 1.8
|
140 |
+
height: 1462
|
141 |
+
img_length_ratio: 0.587
|
142 |
+
ipadapter_image: ${.condition_images}
|
143 |
+
name: The-Laughing-Cavalier
|
144 |
+
prompt: (masterpiece, best quality, highres:1),(1girl, solo:1),(beautiful face,
|
145 |
+
soft skin, costume:1),(eye blinks:{eye_blinks_factor}),(head wave:1.3)
|
146 |
+
refer_image: ${.condition_images}
|
147 |
+
video_path: null
|
148 |
+
width: 1200
|
149 |
+
|
150 |
+
# scene
|
151 |
+
- condition_images: ./data/images/waterfall4.jpeg
|
152 |
+
eye_blinks_factor: 1.8
|
153 |
+
height: 846
|
154 |
+
img_length_ratio: 1.248
|
155 |
+
ipadapter_image: ${.condition_images}
|
156 |
+
name: waterfall4
|
157 |
+
prompt: (masterpiece, best quality, highres:1), peaceful beautiful waterfall, an
|
158 |
+
endless waterfall
|
159 |
+
refer_image: ${.condition_images}
|
160 |
+
video_path: null
|
161 |
+
width: 564
|
162 |
+
- condition_images: ./data/images/river.jpeg
|
163 |
+
eye_blinks_factor: 1.8
|
164 |
+
height: 736
|
165 |
+
img_length_ratio: 0.957
|
166 |
+
ipadapter_image: ${.condition_images}
|
167 |
+
name: river
|
168 |
+
prompt: (masterpiece, best quality, highres:1), peaceful beautiful river
|
169 |
+
refer_image: ${.condition_images}
|
170 |
+
video_path: null
|
171 |
+
width: 736
|
172 |
+
- condition_images: ./data/images/seaside2.jpeg
|
173 |
+
eye_blinks_factor: 1.8
|
174 |
+
height: 1313
|
175 |
+
img_length_ratio: 0.957
|
176 |
+
ipadapter_image: ${.condition_images}
|
177 |
+
name: seaside2
|
178 |
+
prompt: (masterpiece, best quality, highres:1), peaceful beautiful sea scene
|
179 |
+
refer_image: ${.condition_images}
|
180 |
+
video_path: null
|
181 |
+
width: 736
|
182 |
+
|
183 |
+
# video2video
|
184 |
+
- name: "dance1"
|
185 |
+
prompt: "(masterpiece, best quality, highres:1) , a girl is dancing, wearing a dress made of stars, animation"
|
186 |
+
video_path: ./data/source_video/video1_girl_poseseq.mp4
|
187 |
+
condition_images: ./data/images/spark_girl.png
|
188 |
+
refer_image: ${.condition_images}
|
189 |
+
ipadapter_image: ${.condition_images}
|
190 |
+
height: 960
|
191 |
+
width: 512
|
192 |
+
img_length_ratio: 1.0
|
193 |
+
video_is_middle: True # if true, means video_path is controlnet condition, not natural rgb video
|
194 |
+
|
195 |
+
- name: "dance2"
|
196 |
+
prompt: "(best quality), ((masterpiece)), (highres), illustration, original, extremely detailed wallpaper"
|
197 |
+
video_path: ./data/source_video/video1_girl_poseseq.mp4
|
198 |
+
condition_images: ./data/images/cyber_girl.png
|
199 |
+
refer_image: ${.condition_images}
|
200 |
+
ipadapter_image: ${.condition_images}
|
201 |
+
height: 960
|
202 |
+
width: 512
|
203 |
+
img_length_ratio: 1.0
|
204 |
+
video_is_middle: True # if true, means video_path is controlnet condition, not natural rgb video
|
205 |
+
|
206 |
+
- name: "duffy"
|
207 |
+
prompt: "(best quality), ((masterpiece)), (highres), illustration, original, extremely detailed wallpaper"
|
208 |
+
video_path: ./data/source_video/pose-for-Duffy-4.mp4
|
209 |
+
condition_images: ./data/images/duffy.png
|
210 |
+
refer_image: ${.condition_images}
|
211 |
+
ipadapter_image: ${.condition_images}
|
212 |
+
height: 1280
|
213 |
+
width: 704
|
214 |
+
img_length_ratio: 1.0
|
215 |
+
video_is_middle: True # if true, means video_path is controlnet condition, not natural rgb video
|
controlnet_aux/.gitignore
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Initially taken from Github's Python gitignore file
|
2 |
+
|
3 |
+
# Byte-compiled / optimized / DLL files
|
4 |
+
__pycache__/
|
5 |
+
*.py[cod]
|
6 |
+
*$py.class
|
7 |
+
|
8 |
+
# C extensions
|
9 |
+
*.so
|
10 |
+
|
11 |
+
# tests and logs
|
12 |
+
tests/fixtures/cached_*_text.txt
|
13 |
+
logs/
|
14 |
+
lightning_logs/
|
15 |
+
lang_code_data/
|
16 |
+
tests/outputs
|
17 |
+
|
18 |
+
# Distribution / packaging
|
19 |
+
.Python
|
20 |
+
build/
|
21 |
+
develop-eggs/
|
22 |
+
dist/
|
23 |
+
downloads/
|
24 |
+
eggs/
|
25 |
+
.eggs/
|
26 |
+
lib/
|
27 |
+
lib64/
|
28 |
+
parts/
|
29 |
+
sdist/
|
30 |
+
var/
|
31 |
+
wheels/
|
32 |
+
*.egg-info/
|
33 |
+
.installed.cfg
|
34 |
+
*.egg
|
35 |
+
MANIFEST
|
36 |
+
|
37 |
+
# PyInstaller
|
38 |
+
# Usually these files are written by a python script from a template
|
39 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
40 |
+
*.manifest
|
41 |
+
*.spec
|
42 |
+
|
43 |
+
# Installer logs
|
44 |
+
pip-log.txt
|
45 |
+
pip-delete-this-directory.txt
|
46 |
+
|
47 |
+
# Unit test / coverage reports
|
48 |
+
htmlcov/
|
49 |
+
.tox/
|
50 |
+
.nox/
|
51 |
+
.coverage
|
52 |
+
.coverage.*
|
53 |
+
.cache
|
54 |
+
nosetests.xml
|
55 |
+
coverage.xml
|
56 |
+
*.cover
|
57 |
+
.hypothesis/
|
58 |
+
.pytest_cache/
|
59 |
+
|
60 |
+
# Translations
|
61 |
+
*.mo
|
62 |
+
*.pot
|
63 |
+
|
64 |
+
# Django stuff:
|
65 |
+
*.log
|
66 |
+
local_settings.py
|
67 |
+
db.sqlite3
|
68 |
+
|
69 |
+
# Flask stuff:
|
70 |
+
instance/
|
71 |
+
.webassets-cache
|
72 |
+
|
73 |
+
# Scrapy stuff:
|
74 |
+
.scrapy
|
75 |
+
|
76 |
+
# Sphinx documentation
|
77 |
+
docs/_build/
|
78 |
+
|
79 |
+
# PyBuilder
|
80 |
+
target/
|
81 |
+
|
82 |
+
# Jupyter Notebook
|
83 |
+
.ipynb_checkpoints
|
84 |
+
|
85 |
+
# IPython
|
86 |
+
profile_default/
|
87 |
+
ipython_config.py
|
88 |
+
|
89 |
+
# pyenv
|
90 |
+
.python-version
|
91 |
+
|
92 |
+
# celery beat schedule file
|
93 |
+
celerybeat-schedule
|
94 |
+
|
95 |
+
# SageMath parsed files
|
96 |
+
*.sage.py
|
97 |
+
|
98 |
+
# Environments
|
99 |
+
.env
|
100 |
+
.venv
|
101 |
+
env/
|
102 |
+
venv/
|
103 |
+
ENV/
|
104 |
+
env.bak/
|
105 |
+
venv.bak/
|
106 |
+
|
107 |
+
# Spyder project settings
|
108 |
+
.spyderproject
|
109 |
+
.spyproject
|
110 |
+
|
111 |
+
# Rope project settings
|
112 |
+
.ropeproject
|
113 |
+
|
114 |
+
# mkdocs documentation
|
115 |
+
/site
|
116 |
+
|
117 |
+
# mypy
|
118 |
+
.mypy_cache/
|
119 |
+
.dmypy.json
|
120 |
+
dmypy.json
|
121 |
+
|
122 |
+
# Pyre type checker
|
123 |
+
.pyre/
|
124 |
+
|
125 |
+
# vscode
|
126 |
+
.vs
|
127 |
+
.vscode
|
128 |
+
|
129 |
+
# Pycharm
|
130 |
+
.idea
|
131 |
+
|
132 |
+
# TF code
|
133 |
+
tensorflow_code
|
134 |
+
|
135 |
+
# Models
|
136 |
+
proc_data
|
137 |
+
|
138 |
+
# examples
|
139 |
+
runs
|
140 |
+
/runs_old
|
141 |
+
/wandb
|
142 |
+
/examples/runs
|
143 |
+
/examples/**/*.args
|
144 |
+
/examples/rag/sweep
|
145 |
+
|
146 |
+
# data
|
147 |
+
/data
|
148 |
+
serialization_dir
|
149 |
+
|
150 |
+
# emacs
|
151 |
+
*.*~
|
152 |
+
debug.env
|
153 |
+
|
154 |
+
# vim
|
155 |
+
.*.swp
|
156 |
+
|
157 |
+
#ctags
|
158 |
+
tags
|
159 |
+
|
160 |
+
# pre-commit
|
161 |
+
.pre-commit*
|
162 |
+
|
163 |
+
# .lock
|
164 |
+
*.lock
|
165 |
+
|
166 |
+
# DS_Store (MacOS)
|
167 |
+
.DS_Store
|
168 |
+
# RL pipelines may produce mp4 outputs
|
169 |
+
*.mp4
|
170 |
+
|
171 |
+
# dependencies
|
172 |
+
/transformers
|
173 |
+
|
174 |
+
# ruff
|
175 |
+
.ruff_cache
|
176 |
+
|
177 |
+
wandb
|
178 |
+
|
controlnet_aux/LICENSE.txt
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
controlnet_aux/README.md
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ControlNet auxiliary models
|
2 |
+
|
3 |
+
This is a PyPi installable package of [lllyasviel's ControlNet Annotators](https://github.com/lllyasviel/ControlNet/tree/main/annotator)
|
4 |
+
|
5 |
+
The code is copy-pasted from the respective folders in https://github.com/lllyasviel/ControlNet/tree/main/annotator and connected to [the 🤗 Hub](https://huggingface.co/lllyasviel/Annotators).
|
6 |
+
|
7 |
+
All credit & copyright goes to https://github.com/lllyasviel .
|
8 |
+
|
9 |
+
## Install
|
10 |
+
|
11 |
+
```
|
12 |
+
pip install controlnet-aux==0.0.7
|
13 |
+
```
|
14 |
+
|
15 |
+
To support DWPose which is dependent on MMDetection, MMCV and MMPose
|
16 |
+
```
|
17 |
+
pip install -U openmim
|
18 |
+
mim install mmengine
|
19 |
+
mim install "mmcv>=2.0.1"
|
20 |
+
mim install "mmdet>=3.1.0"
|
21 |
+
mim install "mmpose>=1.1.0"
|
22 |
+
```
|
23 |
+
## Usage
|
24 |
+
|
25 |
+
|
26 |
+
You can use the processor class, which can load each of the auxiliary models with the following code
|
27 |
+
```python
|
28 |
+
import requests
|
29 |
+
from PIL import Image
|
30 |
+
from io import BytesIO
|
31 |
+
|
32 |
+
from controlnet_aux.processor import Processor
|
33 |
+
|
34 |
+
# load image
|
35 |
+
url = "https://huggingface.co/lllyasviel/sd-controlnet-openpose/resolve/main/images/pose.png"
|
36 |
+
|
37 |
+
response = requests.get(url)
|
38 |
+
img = Image.open(BytesIO(response.content)).convert("RGB").resize((512, 512))
|
39 |
+
|
40 |
+
# load processor from processor_id
|
41 |
+
# options are:
|
42 |
+
# ["canny", "depth_leres", "depth_leres++", "depth_midas", "depth_zoe", "lineart_anime",
|
43 |
+
# "lineart_coarse", "lineart_realistic", "mediapipe_face", "mlsd", "normal_bae", "normal_midas",
|
44 |
+
# "openpose", "openpose_face", "openpose_faceonly", "openpose_full", "openpose_hand",
|
45 |
+
# "scribble_hed, "scribble_pidinet", "shuffle", "softedge_hed", "softedge_hedsafe",
|
46 |
+
# "softedge_pidinet", "softedge_pidsafe", "dwpose"]
|
47 |
+
processor_id = 'scribble_hed'
|
48 |
+
processor = Processor(processor_id)
|
49 |
+
|
50 |
+
processed_image = processor(img, to_pil=True)
|
51 |
+
```
|
52 |
+
|
53 |
+
Each model can be loaded individually by importing and instantiating them as follows
|
54 |
+
```python
|
55 |
+
from PIL import Image
|
56 |
+
import requests
|
57 |
+
from io import BytesIO
|
58 |
+
from controlnet_aux import HEDdetector, MidasDetector, MLSDdetector, OpenposeDetector, PidiNetDetector, NormalBaeDetector, LineartDetector, LineartAnimeDetector, CannyDetector, ContentShuffleDetector, ZoeDetector, MediapipeFaceDetector, SamDetector, LeresDetector, DWposeDetector
|
59 |
+
|
60 |
+
# load image
|
61 |
+
url = "https://huggingface.co/lllyasviel/sd-controlnet-openpose/resolve/main/images/pose.png"
|
62 |
+
|
63 |
+
response = requests.get(url)
|
64 |
+
img = Image.open(BytesIO(response.content)).convert("RGB").resize((512, 512))
|
65 |
+
|
66 |
+
# load checkpoints
|
67 |
+
hed = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
68 |
+
midas = MidasDetector.from_pretrained("lllyasviel/Annotators")
|
69 |
+
mlsd = MLSDdetector.from_pretrained("lllyasviel/Annotators")
|
70 |
+
open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
71 |
+
pidi = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
72 |
+
normal_bae = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
73 |
+
lineart = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
74 |
+
lineart_anime = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
|
75 |
+
zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators")
|
76 |
+
sam = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
|
77 |
+
mobile_sam = SamDetector.from_pretrained("dhkim2810/MobileSAM", model_type="vit_t", filename="mobile_sam.pt")
|
78 |
+
leres = LeresDetector.from_pretrained("lllyasviel/Annotators")
|
79 |
+
|
80 |
+
# specify configs, ckpts and device, or it will be downloaded automatically and use cpu by default
|
81 |
+
# det_config: ./src/controlnet_aux/dwpose/yolox_config/yolox_l_8xb8-300e_coco.py
|
82 |
+
# det_ckpt: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth
|
83 |
+
# pose_config: ./src/controlnet_aux/dwpose/dwpose_config/dwpose-l_384x288.py
|
84 |
+
# pose_ckpt: https://huggingface.co/wanghaofan/dw-ll_ucoco_384/resolve/main/dw-ll_ucoco_384.pth
|
85 |
+
import torch
|
86 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
87 |
+
dwpose = DWposeDetector(det_config=det_config, det_ckpt=det_ckpt, pose_config=pose_config, pose_ckpt=pose_ckpt, device=device)
|
88 |
+
|
89 |
+
# instantiate
|
90 |
+
canny = CannyDetector()
|
91 |
+
content = ContentShuffleDetector()
|
92 |
+
face_detector = MediapipeFaceDetector()
|
93 |
+
|
94 |
+
|
95 |
+
# process
|
96 |
+
processed_image_hed = hed(img)
|
97 |
+
processed_image_midas = midas(img)
|
98 |
+
processed_image_mlsd = mlsd(img)
|
99 |
+
processed_image_open_pose = open_pose(img, hand_and_face=True)
|
100 |
+
processed_image_pidi = pidi(img, safe=True)
|
101 |
+
processed_image_normal_bae = normal_bae(img)
|
102 |
+
processed_image_lineart = lineart(img, coarse=True)
|
103 |
+
processed_image_lineart_anime = lineart_anime(img)
|
104 |
+
processed_image_zoe = zoe(img)
|
105 |
+
processed_image_sam = sam(img)
|
106 |
+
processed_image_leres = leres(img)
|
107 |
+
|
108 |
+
processed_image_canny = canny(img)
|
109 |
+
processed_image_content = content(img)
|
110 |
+
processed_image_mediapipe_face = face_detector(img)
|
111 |
+
processed_image_dwpose = dwpose(img)
|
112 |
+
```
|
controlnet_aux/setup.py
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
"""
|
16 |
+
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py
|
17 |
+
|
18 |
+
To create the package for pypi.
|
19 |
+
|
20 |
+
1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the
|
21 |
+
documentation.
|
22 |
+
|
23 |
+
If releasing on a special branch, copy the updated README.md on the main branch for your the commit you will make
|
24 |
+
for the post-release and run `make fix-copies` on the main branch as well.
|
25 |
+
|
26 |
+
2. Run Tests for Amazon Sagemaker. The documentation is located in `./tests/sagemaker/README.md`, otherwise @philschmid.
|
27 |
+
|
28 |
+
3. Unpin specific versions from setup.py that use a git install.
|
29 |
+
|
30 |
+
4. Checkout the release branch (v<RELEASE>-release, for example v4.19-release), and commit these changes with the
|
31 |
+
message: "Release: <RELEASE>" and push.
|
32 |
+
|
33 |
+
5. Wait for the tests on main to be completed and be green (otherwise revert and fix bugs)
|
34 |
+
|
35 |
+
6. Add a tag in git to mark the release: "git tag v<RELEASE> -m 'Adds tag v<RELEASE> for pypi' "
|
36 |
+
Push the tag to git: git push --tags origin v<RELEASE>-release
|
37 |
+
|
38 |
+
7. Build both the sources and the wheel. Do not change anything in setup.py between
|
39 |
+
creating the wheel and the source distribution (obviously).
|
40 |
+
|
41 |
+
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
|
42 |
+
(this will build a wheel for the python version you use to build it).
|
43 |
+
|
44 |
+
For the sources, run: "python setup.py sdist"
|
45 |
+
You should now have a /dist directory with both .whl and .tar.gz source versions.
|
46 |
+
|
47 |
+
8. Check that everything looks correct by uploading the package to the pypi test server:
|
48 |
+
|
49 |
+
twine upload dist/* -r pypitest
|
50 |
+
(pypi suggest using twine as other methods upload files via plaintext.)
|
51 |
+
You may have to specify the repository url, use the following command then:
|
52 |
+
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
|
53 |
+
|
54 |
+
Check that you can install it in a virtualenv by running:
|
55 |
+
pip install -i https://testpypi.python.org/pypi diffusers
|
56 |
+
|
57 |
+
Check you can run the following commands:
|
58 |
+
python -c "from diffusers import pipeline; classifier = pipeline('text-classification'); print(classifier('What a nice release'))"
|
59 |
+
python -c "from diffusers import *"
|
60 |
+
|
61 |
+
9. Upload the final version to actual pypi:
|
62 |
+
twine upload dist/* -r pypi
|
63 |
+
|
64 |
+
10. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
|
65 |
+
|
66 |
+
11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release,
|
67 |
+
you need to go back to main before executing this.
|
68 |
+
"""
|
69 |
+
|
70 |
+
import os
|
71 |
+
import re
|
72 |
+
from distutils.core import Command
|
73 |
+
|
74 |
+
from setuptools import find_packages, setup
|
75 |
+
|
76 |
+
# IMPORTANT:
|
77 |
+
# 1. all dependencies should be listed here with their version requirements if any
|
78 |
+
# 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py
|
79 |
+
_deps = [
|
80 |
+
"Pillow",
|
81 |
+
"torch",
|
82 |
+
"numpy",
|
83 |
+
"filelock",
|
84 |
+
"importlib_metadata",
|
85 |
+
"opencv-python-headless",
|
86 |
+
"scipy",
|
87 |
+
"huggingface_hub",
|
88 |
+
"einops",
|
89 |
+
"timm",
|
90 |
+
"torchvision",
|
91 |
+
"scikit-image"
|
92 |
+
]
|
93 |
+
|
94 |
+
# this is a lookup table with items like:
|
95 |
+
#
|
96 |
+
# tokenizers: "huggingface-hub==0.8.0"
|
97 |
+
# packaging: "packaging"
|
98 |
+
#
|
99 |
+
# some of the values are versioned whereas others aren't.
|
100 |
+
deps = {
|
101 |
+
b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)
|
102 |
+
}
|
103 |
+
|
104 |
+
# since we save this data in src/diffusers/dependency_versions_table.py it can be easily accessed from
|
105 |
+
# anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with:
|
106 |
+
#
|
107 |
+
# python -c 'import sys; from diffusers.dependency_versions_table import deps; \
|
108 |
+
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets
|
109 |
+
#
|
110 |
+
# Just pass the desired package names to that script as it's shown with 2 packages above.
|
111 |
+
#
|
112 |
+
# If diffusers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above
|
113 |
+
#
|
114 |
+
# You can then feed this for example to `pip`:
|
115 |
+
#
|
116 |
+
# pip install -U $(python -c 'import sys; from diffusers.dependency_versions_table import deps; \
|
117 |
+
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets)
|
118 |
+
#
|
119 |
+
|
120 |
+
|
121 |
+
def deps_list(*pkgs):
|
122 |
+
return [deps[pkg] for pkg in pkgs]
|
123 |
+
|
124 |
+
|
125 |
+
class DepsTableUpdateCommand(Command):
|
126 |
+
"""
|
127 |
+
A custom distutils command that updates the dependency table.
|
128 |
+
usage: python setup.py deps_table_update
|
129 |
+
"""
|
130 |
+
|
131 |
+
description = "build runtime dependency table"
|
132 |
+
user_options = [
|
133 |
+
# format: (long option, short option, description).
|
134 |
+
(
|
135 |
+
"dep-table-update",
|
136 |
+
None,
|
137 |
+
"updates src/diffusers/dependency_versions_table.py",
|
138 |
+
),
|
139 |
+
]
|
140 |
+
|
141 |
+
def initialize_options(self):
|
142 |
+
pass
|
143 |
+
|
144 |
+
def finalize_options(self):
|
145 |
+
pass
|
146 |
+
|
147 |
+
def run(self):
|
148 |
+
entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()])
|
149 |
+
content = [
|
150 |
+
"# THIS FILE HAS BEEN AUTOGENERATED. To update:",
|
151 |
+
"# 1. modify the `_deps` dict in setup.py",
|
152 |
+
"# 2. run `make deps_table_update``",
|
153 |
+
"deps = {",
|
154 |
+
entries,
|
155 |
+
"}",
|
156 |
+
"",
|
157 |
+
]
|
158 |
+
target = "src/controlnet_aux/dependency_versions_table.py"
|
159 |
+
print(f"updating {target}")
|
160 |
+
with open(target, "w", encoding="utf-8", newline="\n") as f:
|
161 |
+
f.write("\n".join(content))
|
162 |
+
|
163 |
+
|
164 |
+
extras = {}
|
165 |
+
|
166 |
+
install_requires = [
|
167 |
+
deps["torch"],
|
168 |
+
deps["importlib_metadata"],
|
169 |
+
deps["huggingface_hub"],
|
170 |
+
deps["scipy"],
|
171 |
+
deps["opencv-python-headless"],
|
172 |
+
deps["filelock"],
|
173 |
+
deps["numpy"],
|
174 |
+
deps["Pillow"],
|
175 |
+
deps["einops"],
|
176 |
+
deps["torchvision"],
|
177 |
+
deps["timm"],
|
178 |
+
deps["scikit-image"],
|
179 |
+
]
|
180 |
+
|
181 |
+
setup(
|
182 |
+
name="controlnet_aux",
|
183 |
+
version="0.0.6", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
184 |
+
description="Auxillary models for controlnet",
|
185 |
+
long_description=open("README.md", "r", encoding="utf-8").read(),
|
186 |
+
long_description_content_type="text/markdown",
|
187 |
+
keywords="deep learning",
|
188 |
+
license="Apache",
|
189 |
+
author="The HuggingFace team",
|
190 |
+
author_email="patrick@huggingface.co",
|
191 |
+
url="https://github.com/patrickvonplaten/controlnet_aux",
|
192 |
+
package_dir={"": "src"},
|
193 |
+
packages=find_packages("src"),
|
194 |
+
include_package_data=True,
|
195 |
+
python_requires=">=3.7.0",
|
196 |
+
install_requires=install_requires,
|
197 |
+
extras_require=extras,
|
198 |
+
classifiers=[
|
199 |
+
"Development Status :: 5 - Production/Stable",
|
200 |
+
"Intended Audience :: Developers",
|
201 |
+
"Intended Audience :: Education",
|
202 |
+
"Intended Audience :: Science/Research",
|
203 |
+
"License :: OSI Approved :: Apache Software License",
|
204 |
+
"Operating System :: OS Independent",
|
205 |
+
"Programming Language :: Python :: 3",
|
206 |
+
"Programming Language :: Python :: 3.7",
|
207 |
+
"Programming Language :: Python :: 3.8",
|
208 |
+
"Programming Language :: Python :: 3.9",
|
209 |
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
210 |
+
],
|
211 |
+
cmdclass={"deps_table_update": DepsTableUpdateCommand},
|
212 |
+
package_data={'controlnet_aux' : ['zoe/zoedepth/models/zoedepth/*.json', 'zoe/zoedepth/models/zoedepth_nk/*.json']}
|
213 |
+
)
|
214 |
+
|
215 |
+
# Release checklist
|
216 |
+
# 1. Change the version in __init__.py and setup.py.
|
217 |
+
# 2. Commit these changes with the message: "Release: Release"
|
218 |
+
# 3. Add a tag in git to mark the release: "git tag RELEASE -m 'Adds tag RELEASE for pypi' "
|
219 |
+
# Push the tag to git: git push --tags origin main
|
220 |
+
# 4. Run the following commands in the top-level directory:
|
221 |
+
# python setup.py bdist_wheel
|
222 |
+
# python setup.py sdist
|
223 |
+
# 5. Upload the package to the pypi test server first:
|
224 |
+
# twine upload dist/* -r pypitest
|
225 |
+
# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
|
226 |
+
# 6. Check that you can install it in a virtualenv by running:
|
227 |
+
# pip install -i https://testpypi.python.org/pypi diffusers
|
228 |
+
# diffusers env
|
229 |
+
# diffusers test
|
230 |
+
# 7. Upload the final version to actual pypi:
|
231 |
+
# twine upload dist/* -r pypi
|
232 |
+
# 8. Add release notes to the tag in github once everything is looking hunky-dory.
|
233 |
+
# 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
|
controlnet_aux/src/controlnet_aux/__init__.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__version__ = "0.0.6"
|
2 |
+
|
3 |
+
from .hed import HEDdetector
|
4 |
+
from .leres import LeresDetector
|
5 |
+
from .lineart import LineartDetector
|
6 |
+
from .lineart_anime import LineartAnimeDetector
|
7 |
+
from .midas import MidasDetector
|
8 |
+
from .mlsd import MLSDdetector
|
9 |
+
from .normalbae import NormalBaeDetector
|
10 |
+
from .open_pose import OpenposeDetector
|
11 |
+
from .pidi import PidiNetDetector
|
12 |
+
from .zoe import ZoeDetector
|
13 |
+
|
14 |
+
from .canny import CannyDetector
|
15 |
+
from .mediapipe_face import MediapipeFaceDetector
|
16 |
+
from .segment_anything import SamDetector
|
17 |
+
from .shuffle import ContentShuffleDetector
|
18 |
+
from .dwpose import DWposeDetector
|
controlnet_aux/src/controlnet_aux/canny/__init__.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
from ..util import HWC3, resize_image
|
6 |
+
|
7 |
+
class CannyDetector:
|
8 |
+
def __call__(self, input_image=None, low_threshold=100, high_threshold=200, detect_resolution=512, image_resolution=512, output_type=None, **kwargs):
|
9 |
+
if "img" in kwargs:
|
10 |
+
warnings.warn("img is deprecated, please use `input_image=...` instead.", DeprecationWarning)
|
11 |
+
input_image = kwargs.pop("img")
|
12 |
+
|
13 |
+
if input_image is None:
|
14 |
+
raise ValueError("input_image must be defined.")
|
15 |
+
|
16 |
+
if not isinstance(input_image, np.ndarray):
|
17 |
+
input_image = np.array(input_image, dtype=np.uint8)
|
18 |
+
output_type = output_type or "pil"
|
19 |
+
else:
|
20 |
+
output_type = output_type or "np"
|
21 |
+
|
22 |
+
input_image = HWC3(input_image)
|
23 |
+
input_image = resize_image(input_image, detect_resolution)
|
24 |
+
|
25 |
+
detected_map = cv2.Canny(input_image, low_threshold, high_threshold)
|
26 |
+
detected_map = HWC3(detected_map)
|
27 |
+
|
28 |
+
img = resize_image(input_image, image_resolution)
|
29 |
+
H, W, C = img.shape
|
30 |
+
|
31 |
+
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
|
32 |
+
|
33 |
+
if output_type == "pil":
|
34 |
+
detected_map = Image.fromarray(detected_map)
|
35 |
+
|
36 |
+
return detected_map
|
controlnet_aux/src/controlnet_aux/dwpose/__init__.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Openpose
|
2 |
+
# Original from CMU https://github.com/CMU-Perceptual-Computing-Lab/openpose
|
3 |
+
# 2nd Edited by https://github.com/Hzzone/pytorch-openpose
|
4 |
+
# 3rd Edited by ControlNet
|
5 |
+
# 4th Edited by ControlNet (added face and correct hands)
|
6 |
+
|
7 |
+
import os
|
8 |
+
|
9 |
+
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
10 |
+
|
11 |
+
import cv2
|
12 |
+
import torch
|
13 |
+
import numpy as np
|
14 |
+
from PIL import Image
|
15 |
+
|
16 |
+
from ..util import HWC3, resize_image
|
17 |
+
from . import util
|
18 |
+
from pprint import pprint
|
19 |
+
|
20 |
+
|
21 |
+
def draw_pose(pose, H, W):
|
22 |
+
bodies = pose["bodies"]
|
23 |
+
faces = pose["faces"]
|
24 |
+
hands = pose["hands"]
|
25 |
+
candidate = bodies["candidate"]
|
26 |
+
subset = bodies["subset"]
|
27 |
+
|
28 |
+
canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8)
|
29 |
+
canvas = util.draw_bodypose(canvas, candidate, subset)
|
30 |
+
canvas = util.draw_handpose(canvas, hands)
|
31 |
+
canvas = util.draw_facepose(canvas, faces)
|
32 |
+
|
33 |
+
return canvas
|
34 |
+
|
35 |
+
|
36 |
+
def draw_pose_on_canvas(pose, canvas):
|
37 |
+
bodies = pose["bodies"]
|
38 |
+
faces = pose["faces"]
|
39 |
+
hands = pose["hands"]
|
40 |
+
candidate = bodies["candidate"]
|
41 |
+
subset = bodies["subset"]
|
42 |
+
|
43 |
+
canvas = util.draw_bodypose(canvas, candidate, subset)
|
44 |
+
canvas = util.draw_handpose(canvas, hands)
|
45 |
+
canvas = util.draw_facepose(canvas, faces)
|
46 |
+
|
47 |
+
return canvas
|
48 |
+
|
49 |
+
|
50 |
+
def candidate2pose(
|
51 |
+
candidate,
|
52 |
+
subset,
|
53 |
+
include_body: bool = True,
|
54 |
+
include_face: bool = False,
|
55 |
+
hand_and_face: bool = None,
|
56 |
+
include_hand: bool = True,
|
57 |
+
):
|
58 |
+
if hand_and_face is not None:
|
59 |
+
include_face = True
|
60 |
+
include_hand = True
|
61 |
+
|
62 |
+
nums, keys, locs = candidate.shape
|
63 |
+
body = candidate[:, :18].copy()
|
64 |
+
body = body.reshape(nums * 18, locs)
|
65 |
+
score = subset[:, :18]
|
66 |
+
|
67 |
+
for i in range(len(score)):
|
68 |
+
for j in range(len(score[i])):
|
69 |
+
if score[i][j] > 0.3:
|
70 |
+
score[i][j] = int(18 * i + j)
|
71 |
+
else:
|
72 |
+
score[i][j] = -1
|
73 |
+
|
74 |
+
un_visible = subset < 0.3
|
75 |
+
candidate[un_visible] = -1
|
76 |
+
|
77 |
+
foot = candidate[:, 18:24]
|
78 |
+
|
79 |
+
faces = candidate[:, 24:92]
|
80 |
+
|
81 |
+
hands = candidate[:, 92:113]
|
82 |
+
hands = np.vstack([hands, candidate[:, 113:]])
|
83 |
+
|
84 |
+
bodies = dict(candidate=body, subset=score)
|
85 |
+
if not include_body:
|
86 |
+
bodies = []
|
87 |
+
if not include_face:
|
88 |
+
faces = []
|
89 |
+
if not include_hand:
|
90 |
+
hands = []
|
91 |
+
pose = dict(bodies=bodies, hands=hands, faces=faces)
|
92 |
+
return pose
|
93 |
+
|
94 |
+
|
95 |
+
def size_calculate(H, W, resolution):
|
96 |
+
H = float(H)
|
97 |
+
W = float(W)
|
98 |
+
k = float(resolution) / min(H, W)
|
99 |
+
H *= k
|
100 |
+
W *= k
|
101 |
+
H = int(np.round(H / 64.0)) * 64
|
102 |
+
W = int(np.round(W / 64.0)) * 64
|
103 |
+
return H, W
|
104 |
+
|
105 |
+
|
106 |
+
def pose2map(pose, H_in, W_in, detect_resolution, image_resolution):
|
107 |
+
H, W = size_calculate(H_in, W_in, detect_resolution)
|
108 |
+
detected_map = draw_pose(pose, H, W)
|
109 |
+
detected_map = HWC3(detected_map)
|
110 |
+
|
111 |
+
H, W = size_calculate(H, W, image_resolution)
|
112 |
+
|
113 |
+
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
|
114 |
+
|
115 |
+
return detected_map
|
116 |
+
|
117 |
+
|
118 |
+
def pose2map_on_canvas(pose, H_in, W_in, detect_resolution, image_resolution, canvas):
|
119 |
+
H, W = size_calculate(H_in, W_in, detect_resolution)
|
120 |
+
detected_map = draw_pose_on_canvas(pose, canvas)
|
121 |
+
detected_map = HWC3(detected_map)
|
122 |
+
|
123 |
+
H, W = size_calculate(H, W, image_resolution)
|
124 |
+
|
125 |
+
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
|
126 |
+
|
127 |
+
return detected_map
|
128 |
+
|
129 |
+
|
130 |
+
class DWposeDetector:
|
131 |
+
def __init__(
|
132 |
+
self,
|
133 |
+
det_config=None,
|
134 |
+
det_ckpt=None,
|
135 |
+
pose_config=None,
|
136 |
+
pose_ckpt=None,
|
137 |
+
device="cpu",
|
138 |
+
):
|
139 |
+
from .wholebody import Wholebody
|
140 |
+
|
141 |
+
self.pose_estimation = Wholebody(
|
142 |
+
det_config, det_ckpt, pose_config, pose_ckpt, device
|
143 |
+
)
|
144 |
+
|
145 |
+
def to(self, device):
|
146 |
+
self.pose_estimation.to(device)
|
147 |
+
return self
|
148 |
+
|
149 |
+
def __call__(
|
150 |
+
self,
|
151 |
+
input_image,
|
152 |
+
detect_resolution=512,
|
153 |
+
image_resolution=512,
|
154 |
+
output_type="pil",
|
155 |
+
return_pose_dict=False,
|
156 |
+
return_pose_only=False,
|
157 |
+
include_body: bool = True,
|
158 |
+
include_hand: bool = True,
|
159 |
+
include_face: bool = True,
|
160 |
+
hand_hand_face: bool = None,
|
161 |
+
**kwargs
|
162 |
+
):
|
163 |
+
if hand_hand_face:
|
164 |
+
include_face = True
|
165 |
+
include_hand = True
|
166 |
+
input_image = cv2.cvtColor(
|
167 |
+
np.array(input_image, dtype=np.uint8), cv2.COLOR_RGB2BGR
|
168 |
+
)
|
169 |
+
|
170 |
+
input_image = HWC3(input_image)
|
171 |
+
input_image = resize_image(input_image, detect_resolution)
|
172 |
+
H, W, C = input_image.shape
|
173 |
+
|
174 |
+
with torch.no_grad():
|
175 |
+
# print('=========== in controlnet_aux dwpose')
|
176 |
+
candidate, subset = self.pose_estimation(input_image)
|
177 |
+
# print(candidate.shape)
|
178 |
+
# print(subset.shape)
|
179 |
+
# candidate shape (1, 134, 2)
|
180 |
+
# subset (1, 134)
|
181 |
+
nums, keys, locs = candidate.shape
|
182 |
+
candidate[..., 0] /= float(W)
|
183 |
+
candidate[..., 1] /= float(H)
|
184 |
+
|
185 |
+
if return_pose_only:
|
186 |
+
return (candidate, subset)
|
187 |
+
|
188 |
+
body = candidate[:, :18].copy()
|
189 |
+
body = body.reshape(nums * 18, locs)
|
190 |
+
score = subset[:, :18]
|
191 |
+
|
192 |
+
for i in range(len(score)):
|
193 |
+
for j in range(len(score[i])):
|
194 |
+
if score[i][j] > 0.3:
|
195 |
+
score[i][j] = int(18 * i + j)
|
196 |
+
else:
|
197 |
+
score[i][j] = -1
|
198 |
+
|
199 |
+
un_visible = subset < 0.3
|
200 |
+
candidate[un_visible] = -1
|
201 |
+
|
202 |
+
foot = candidate[:, 18:24]
|
203 |
+
|
204 |
+
faces = candidate[:, 24:92]
|
205 |
+
|
206 |
+
hands = candidate[:, 92:113]
|
207 |
+
hands = np.vstack([hands, candidate[:, 113:]])
|
208 |
+
|
209 |
+
bodies = dict(candidate=body, subset=score)
|
210 |
+
|
211 |
+
if not include_face:
|
212 |
+
faces = []
|
213 |
+
if not include_body:
|
214 |
+
bodies = []
|
215 |
+
if not include_hand:
|
216 |
+
hands = []
|
217 |
+
pose = dict(bodies=bodies, hands=hands, faces=faces)
|
218 |
+
|
219 |
+
detected_map = draw_pose(pose, H, W)
|
220 |
+
detected_map = HWC3(detected_map)
|
221 |
+
|
222 |
+
img = resize_image(input_image, image_resolution)
|
223 |
+
H, W, C = img.shape
|
224 |
+
|
225 |
+
detected_map = cv2.resize(
|
226 |
+
detected_map, (W, H), interpolation=cv2.INTER_LINEAR
|
227 |
+
)
|
228 |
+
|
229 |
+
if output_type == "pil":
|
230 |
+
detected_map = Image.fromarray(detected_map)
|
231 |
+
|
232 |
+
if return_pose_dict:
|
233 |
+
return detected_map, pose
|
234 |
+
else:
|
235 |
+
return detected_map
|
controlnet_aux/src/controlnet_aux/dwpose/dwpose_config/dwpose-l_384x288.py
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# runtime
|
2 |
+
max_epochs = 270
|
3 |
+
stage2_num_epochs = 30
|
4 |
+
base_lr = 4e-3
|
5 |
+
|
6 |
+
train_cfg = dict(max_epochs=max_epochs, val_interval=10)
|
7 |
+
randomness = dict(seed=21)
|
8 |
+
|
9 |
+
# optimizer
|
10 |
+
optim_wrapper = dict(
|
11 |
+
type='OptimWrapper',
|
12 |
+
optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
|
13 |
+
paramwise_cfg=dict(
|
14 |
+
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
|
15 |
+
|
16 |
+
# learning rate
|
17 |
+
param_scheduler = [
|
18 |
+
dict(
|
19 |
+
type='LinearLR',
|
20 |
+
start_factor=1.0e-5,
|
21 |
+
by_epoch=False,
|
22 |
+
begin=0,
|
23 |
+
end=1000),
|
24 |
+
dict(
|
25 |
+
# use cosine lr from 150 to 300 epoch
|
26 |
+
type='CosineAnnealingLR',
|
27 |
+
eta_min=base_lr * 0.05,
|
28 |
+
begin=max_epochs // 2,
|
29 |
+
end=max_epochs,
|
30 |
+
T_max=max_epochs // 2,
|
31 |
+
by_epoch=True,
|
32 |
+
convert_to_iter_based=True),
|
33 |
+
]
|
34 |
+
|
35 |
+
# automatically scaling LR based on the actual training batch size
|
36 |
+
auto_scale_lr = dict(base_batch_size=512)
|
37 |
+
|
38 |
+
# codec settings
|
39 |
+
codec = dict(
|
40 |
+
type='SimCCLabel',
|
41 |
+
input_size=(288, 384),
|
42 |
+
sigma=(6., 6.93),
|
43 |
+
simcc_split_ratio=2.0,
|
44 |
+
normalize=False,
|
45 |
+
use_dark=False)
|
46 |
+
|
47 |
+
# model settings
|
48 |
+
model = dict(
|
49 |
+
type='TopdownPoseEstimator',
|
50 |
+
data_preprocessor=dict(
|
51 |
+
type='PoseDataPreprocessor',
|
52 |
+
mean=[123.675, 116.28, 103.53],
|
53 |
+
std=[58.395, 57.12, 57.375],
|
54 |
+
bgr_to_rgb=True),
|
55 |
+
backbone=dict(
|
56 |
+
_scope_='mmdet',
|
57 |
+
type='CSPNeXt',
|
58 |
+
arch='P5',
|
59 |
+
expand_ratio=0.5,
|
60 |
+
deepen_factor=1.,
|
61 |
+
widen_factor=1.,
|
62 |
+
out_indices=(4, ),
|
63 |
+
channel_attention=True,
|
64 |
+
norm_cfg=dict(type='SyncBN'),
|
65 |
+
act_cfg=dict(type='SiLU'),
|
66 |
+
init_cfg=dict(
|
67 |
+
type='Pretrained',
|
68 |
+
prefix='backbone.',
|
69 |
+
checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
|
70 |
+
'rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa
|
71 |
+
)),
|
72 |
+
head=dict(
|
73 |
+
type='RTMCCHead',
|
74 |
+
in_channels=1024,
|
75 |
+
out_channels=133,
|
76 |
+
input_size=codec['input_size'],
|
77 |
+
in_featuremap_size=(9, 12),
|
78 |
+
simcc_split_ratio=codec['simcc_split_ratio'],
|
79 |
+
final_layer_kernel_size=7,
|
80 |
+
gau_cfg=dict(
|
81 |
+
hidden_dims=256,
|
82 |
+
s=128,
|
83 |
+
expansion_factor=2,
|
84 |
+
dropout_rate=0.,
|
85 |
+
drop_path=0.,
|
86 |
+
act_fn='SiLU',
|
87 |
+
use_rel_bias=False,
|
88 |
+
pos_enc=False),
|
89 |
+
loss=dict(
|
90 |
+
type='KLDiscretLoss',
|
91 |
+
use_target_weight=True,
|
92 |
+
beta=10.,
|
93 |
+
label_softmax=True),
|
94 |
+
decoder=codec),
|
95 |
+
test_cfg=dict(flip_test=True, ))
|
96 |
+
|
97 |
+
# base dataset settings
|
98 |
+
dataset_type = 'CocoWholeBodyDataset'
|
99 |
+
data_mode = 'topdown'
|
100 |
+
data_root = '/data/'
|
101 |
+
|
102 |
+
backend_args = dict(backend='local')
|
103 |
+
# backend_args = dict(
|
104 |
+
# backend='petrel',
|
105 |
+
# path_mapping=dict({
|
106 |
+
# f'{data_root}': 's3://openmmlab/datasets/detection/coco/',
|
107 |
+
# f'{data_root}': 's3://openmmlab/datasets/detection/coco/'
|
108 |
+
# }))
|
109 |
+
|
110 |
+
# pipelines
|
111 |
+
train_pipeline = [
|
112 |
+
dict(type='LoadImage', backend_args=backend_args),
|
113 |
+
dict(type='GetBBoxCenterScale'),
|
114 |
+
dict(type='RandomFlip', direction='horizontal'),
|
115 |
+
dict(type='RandomHalfBody'),
|
116 |
+
dict(
|
117 |
+
type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80),
|
118 |
+
dict(type='TopdownAffine', input_size=codec['input_size']),
|
119 |
+
dict(type='mmdet.YOLOXHSVRandomAug'),
|
120 |
+
dict(
|
121 |
+
type='Albumentation',
|
122 |
+
transforms=[
|
123 |
+
dict(type='Blur', p=0.1),
|
124 |
+
dict(type='MedianBlur', p=0.1),
|
125 |
+
dict(
|
126 |
+
type='CoarseDropout',
|
127 |
+
max_holes=1,
|
128 |
+
max_height=0.4,
|
129 |
+
max_width=0.4,
|
130 |
+
min_holes=1,
|
131 |
+
min_height=0.2,
|
132 |
+
min_width=0.2,
|
133 |
+
p=1.0),
|
134 |
+
]),
|
135 |
+
dict(type='GenerateTarget', encoder=codec),
|
136 |
+
dict(type='PackPoseInputs')
|
137 |
+
]
|
138 |
+
val_pipeline = [
|
139 |
+
dict(type='LoadImage', backend_args=backend_args),
|
140 |
+
dict(type='GetBBoxCenterScale'),
|
141 |
+
dict(type='TopdownAffine', input_size=codec['input_size']),
|
142 |
+
dict(type='PackPoseInputs')
|
143 |
+
]
|
144 |
+
|
145 |
+
train_pipeline_stage2 = [
|
146 |
+
dict(type='LoadImage', backend_args=backend_args),
|
147 |
+
dict(type='GetBBoxCenterScale'),
|
148 |
+
dict(type='RandomFlip', direction='horizontal'),
|
149 |
+
dict(type='RandomHalfBody'),
|
150 |
+
dict(
|
151 |
+
type='RandomBBoxTransform',
|
152 |
+
shift_factor=0.,
|
153 |
+
scale_factor=[0.75, 1.25],
|
154 |
+
rotate_factor=60),
|
155 |
+
dict(type='TopdownAffine', input_size=codec['input_size']),
|
156 |
+
dict(type='mmdet.YOLOXHSVRandomAug'),
|
157 |
+
dict(
|
158 |
+
type='Albumentation',
|
159 |
+
transforms=[
|
160 |
+
dict(type='Blur', p=0.1),
|
161 |
+
dict(type='MedianBlur', p=0.1),
|
162 |
+
dict(
|
163 |
+
type='CoarseDropout',
|
164 |
+
max_holes=1,
|
165 |
+
max_height=0.4,
|
166 |
+
max_width=0.4,
|
167 |
+
min_holes=1,
|
168 |
+
min_height=0.2,
|
169 |
+
min_width=0.2,
|
170 |
+
p=0.5),
|
171 |
+
]),
|
172 |
+
dict(type='GenerateTarget', encoder=codec),
|
173 |
+
dict(type='PackPoseInputs')
|
174 |
+
]
|
175 |
+
|
176 |
+
datasets = []
|
177 |
+
dataset_coco=dict(
|
178 |
+
type=dataset_type,
|
179 |
+
data_root=data_root,
|
180 |
+
data_mode=data_mode,
|
181 |
+
ann_file='coco/annotations/coco_wholebody_train_v1.0.json',
|
182 |
+
data_prefix=dict(img='coco/train2017/'),
|
183 |
+
pipeline=[],
|
184 |
+
)
|
185 |
+
datasets.append(dataset_coco)
|
186 |
+
|
187 |
+
scene = ['Magic_show', 'Entertainment', 'ConductMusic', 'Online_class',
|
188 |
+
'TalkShow', 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow',
|
189 |
+
'Singing', 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference']
|
190 |
+
|
191 |
+
for i in range(len(scene)):
|
192 |
+
datasets.append(
|
193 |
+
dict(
|
194 |
+
type=dataset_type,
|
195 |
+
data_root=data_root,
|
196 |
+
data_mode=data_mode,
|
197 |
+
ann_file='UBody/annotations/'+scene[i]+'/keypoint_annotation.json',
|
198 |
+
data_prefix=dict(img='UBody/images/'+scene[i]+'/'),
|
199 |
+
pipeline=[],
|
200 |
+
)
|
201 |
+
)
|
202 |
+
|
203 |
+
# data loaders
|
204 |
+
train_dataloader = dict(
|
205 |
+
batch_size=32,
|
206 |
+
num_workers=10,
|
207 |
+
persistent_workers=True,
|
208 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
209 |
+
dataset=dict(
|
210 |
+
type='CombinedDataset',
|
211 |
+
metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'),
|
212 |
+
datasets=datasets,
|
213 |
+
pipeline=train_pipeline,
|
214 |
+
test_mode=False,
|
215 |
+
))
|
216 |
+
val_dataloader = dict(
|
217 |
+
batch_size=32,
|
218 |
+
num_workers=10,
|
219 |
+
persistent_workers=True,
|
220 |
+
drop_last=False,
|
221 |
+
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
|
222 |
+
dataset=dict(
|
223 |
+
type=dataset_type,
|
224 |
+
data_root=data_root,
|
225 |
+
data_mode=data_mode,
|
226 |
+
ann_file='coco/annotations/coco_wholebody_val_v1.0.json',
|
227 |
+
bbox_file=f'{data_root}coco/person_detection_results/'
|
228 |
+
'COCO_val2017_detections_AP_H_56_person.json',
|
229 |
+
data_prefix=dict(img='coco/val2017/'),
|
230 |
+
test_mode=True,
|
231 |
+
pipeline=val_pipeline,
|
232 |
+
))
|
233 |
+
test_dataloader = val_dataloader
|
234 |
+
|
235 |
+
# hooks
|
236 |
+
default_hooks = dict(
|
237 |
+
checkpoint=dict(
|
238 |
+
save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1))
|
239 |
+
|
240 |
+
custom_hooks = [
|
241 |
+
dict(
|
242 |
+
type='EMAHook',
|
243 |
+
ema_type='ExpMomentumEMA',
|
244 |
+
momentum=0.0002,
|
245 |
+
update_buffers=True,
|
246 |
+
priority=49),
|
247 |
+
dict(
|
248 |
+
type='mmdet.PipelineSwitchHook',
|
249 |
+
switch_epoch=max_epochs - stage2_num_epochs,
|
250 |
+
switch_pipeline=train_pipeline_stage2)
|
251 |
+
]
|
252 |
+
|
253 |
+
# evaluators
|
254 |
+
val_evaluator = dict(
|
255 |
+
type='CocoWholeBodyMetric',
|
256 |
+
ann_file=data_root + 'coco/annotations/coco_wholebody_val_v1.0.json')
|
257 |
+
test_evaluator = val_evaluator
|
controlnet_aux/src/controlnet_aux/dwpose/util.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
|
5 |
+
|
6 |
+
eps = 0.01
|
7 |
+
|
8 |
+
|
9 |
+
def smart_resize(x, s):
|
10 |
+
Ht, Wt = s
|
11 |
+
if x.ndim == 2:
|
12 |
+
Ho, Wo = x.shape
|
13 |
+
Co = 1
|
14 |
+
else:
|
15 |
+
Ho, Wo, Co = x.shape
|
16 |
+
if Co == 3 or Co == 1:
|
17 |
+
k = float(Ht + Wt) / float(Ho + Wo)
|
18 |
+
return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4)
|
19 |
+
else:
|
20 |
+
return np.stack([smart_resize(x[:, :, i], s) for i in range(Co)], axis=2)
|
21 |
+
|
22 |
+
|
23 |
+
def smart_resize_k(x, fx, fy):
|
24 |
+
if x.ndim == 2:
|
25 |
+
Ho, Wo = x.shape
|
26 |
+
Co = 1
|
27 |
+
else:
|
28 |
+
Ho, Wo, Co = x.shape
|
29 |
+
Ht, Wt = Ho * fy, Wo * fx
|
30 |
+
if Co == 3 or Co == 1:
|
31 |
+
k = float(Ht + Wt) / float(Ho + Wo)
|
32 |
+
return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4)
|
33 |
+
else:
|
34 |
+
return np.stack([smart_resize_k(x[:, :, i], fx, fy) for i in range(Co)], axis=2)
|
35 |
+
|
36 |
+
|
37 |
+
def padRightDownCorner(img, stride, padValue):
|
38 |
+
h = img.shape[0]
|
39 |
+
w = img.shape[1]
|
40 |
+
|
41 |
+
pad = 4 * [None]
|
42 |
+
pad[0] = 0 # up
|
43 |
+
pad[1] = 0 # left
|
44 |
+
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
|
45 |
+
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
|
46 |
+
|
47 |
+
img_padded = img
|
48 |
+
pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
|
49 |
+
img_padded = np.concatenate((pad_up, img_padded), axis=0)
|
50 |
+
pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
|
51 |
+
img_padded = np.concatenate((pad_left, img_padded), axis=1)
|
52 |
+
pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
|
53 |
+
img_padded = np.concatenate((img_padded, pad_down), axis=0)
|
54 |
+
pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
|
55 |
+
img_padded = np.concatenate((img_padded, pad_right), axis=1)
|
56 |
+
|
57 |
+
return img_padded, pad
|
58 |
+
|
59 |
+
|
60 |
+
def transfer(model, model_weights):
|
61 |
+
transfered_model_weights = {}
|
62 |
+
for weights_name in model.state_dict().keys():
|
63 |
+
transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
|
64 |
+
return transfered_model_weights
|
65 |
+
|
66 |
+
|
67 |
+
def draw_bodypose(canvas, candidate, subset):
|
68 |
+
H, W, C = canvas.shape
|
69 |
+
candidate = np.array(candidate)
|
70 |
+
subset = np.array(subset)
|
71 |
+
|
72 |
+
stickwidth = 4
|
73 |
+
|
74 |
+
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
|
75 |
+
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
|
76 |
+
[1, 16], [16, 18], [3, 17], [6, 18]]
|
77 |
+
|
78 |
+
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
|
79 |
+
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
|
80 |
+
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
|
81 |
+
|
82 |
+
for i in range(17):
|
83 |
+
for n in range(len(subset)):
|
84 |
+
index = subset[n][np.array(limbSeq[i]) - 1]
|
85 |
+
if -1 in index:
|
86 |
+
continue
|
87 |
+
Y = candidate[index.astype(int), 0] * float(W)
|
88 |
+
X = candidate[index.astype(int), 1] * float(H)
|
89 |
+
mX = np.mean(X)
|
90 |
+
mY = np.mean(Y)
|
91 |
+
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
|
92 |
+
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
|
93 |
+
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
|
94 |
+
cv2.fillConvexPoly(canvas, polygon, colors[i])
|
95 |
+
|
96 |
+
canvas = (canvas * 0.6).astype(np.uint8)
|
97 |
+
|
98 |
+
for i in range(18):
|
99 |
+
for n in range(len(subset)):
|
100 |
+
index = int(subset[n][i])
|
101 |
+
if index == -1:
|
102 |
+
continue
|
103 |
+
x, y = candidate[index][0:2]
|
104 |
+
x = int(x * W)
|
105 |
+
y = int(y * H)
|
106 |
+
cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
|
107 |
+
|
108 |
+
return canvas
|
109 |
+
|
110 |
+
|
111 |
+
def draw_handpose(canvas, all_hand_peaks):
|
112 |
+
import matplotlib
|
113 |
+
|
114 |
+
H, W, C = canvas.shape
|
115 |
+
|
116 |
+
edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
|
117 |
+
[10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
|
118 |
+
|
119 |
+
# (person_number*2, 21, 2)
|
120 |
+
for i in range(len(all_hand_peaks)):
|
121 |
+
peaks = all_hand_peaks[i]
|
122 |
+
peaks = np.array(peaks)
|
123 |
+
|
124 |
+
for ie, e in enumerate(edges):
|
125 |
+
|
126 |
+
x1, y1 = peaks[e[0]]
|
127 |
+
x2, y2 = peaks[e[1]]
|
128 |
+
|
129 |
+
x1 = int(x1 * W)
|
130 |
+
y1 = int(y1 * H)
|
131 |
+
x2 = int(x2 * W)
|
132 |
+
y2 = int(y2 * H)
|
133 |
+
if x1 > eps and y1 > eps and x2 > eps and y2 > eps:
|
134 |
+
cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255, thickness=2)
|
135 |
+
|
136 |
+
for _, keyponit in enumerate(peaks):
|
137 |
+
x, y = keyponit
|
138 |
+
|
139 |
+
x = int(x * W)
|
140 |
+
y = int(y * H)
|
141 |
+
if x > eps and y > eps:
|
142 |
+
cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
|
143 |
+
return canvas
|
144 |
+
|
145 |
+
|
146 |
+
def draw_facepose(canvas, all_lmks):
|
147 |
+
H, W, C = canvas.shape
|
148 |
+
for lmks in all_lmks:
|
149 |
+
lmks = np.array(lmks)
|
150 |
+
for lmk in lmks:
|
151 |
+
x, y = lmk
|
152 |
+
x = int(x * W)
|
153 |
+
y = int(y * H)
|
154 |
+
if x > eps and y > eps:
|
155 |
+
cv2.circle(canvas, (x, y), 3, (255, 255, 255), thickness=-1)
|
156 |
+
return canvas
|
157 |
+
|
158 |
+
|
159 |
+
# detect hand according to body pose keypoints
|
160 |
+
# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
|
161 |
+
def handDetect(candidate, subset, oriImg):
|
162 |
+
# right hand: wrist 4, elbow 3, shoulder 2
|
163 |
+
# left hand: wrist 7, elbow 6, shoulder 5
|
164 |
+
ratioWristElbow = 0.33
|
165 |
+
detect_result = []
|
166 |
+
image_height, image_width = oriImg.shape[0:2]
|
167 |
+
for person in subset.astype(int):
|
168 |
+
# if any of three not detected
|
169 |
+
has_left = np.sum(person[[5, 6, 7]] == -1) == 0
|
170 |
+
has_right = np.sum(person[[2, 3, 4]] == -1) == 0
|
171 |
+
if not (has_left or has_right):
|
172 |
+
continue
|
173 |
+
hands = []
|
174 |
+
#left hand
|
175 |
+
if has_left:
|
176 |
+
left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
|
177 |
+
x1, y1 = candidate[left_shoulder_index][:2]
|
178 |
+
x2, y2 = candidate[left_elbow_index][:2]
|
179 |
+
x3, y3 = candidate[left_wrist_index][:2]
|
180 |
+
hands.append([x1, y1, x2, y2, x3, y3, True])
|
181 |
+
# right hand
|
182 |
+
if has_right:
|
183 |
+
right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
|
184 |
+
x1, y1 = candidate[right_shoulder_index][:2]
|
185 |
+
x2, y2 = candidate[right_elbow_index][:2]
|
186 |
+
x3, y3 = candidate[right_wrist_index][:2]
|
187 |
+
hands.append([x1, y1, x2, y2, x3, y3, False])
|
188 |
+
|
189 |
+
for x1, y1, x2, y2, x3, y3, is_left in hands:
|
190 |
+
# pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
|
191 |
+
# handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
|
192 |
+
# handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
|
193 |
+
# const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
|
194 |
+
# const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
|
195 |
+
# handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
|
196 |
+
x = x3 + ratioWristElbow * (x3 - x2)
|
197 |
+
y = y3 + ratioWristElbow * (y3 - y2)
|
198 |
+
distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
|
199 |
+
distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
|
200 |
+
width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
|
201 |
+
# x-y refers to the center --> offset to topLeft point
|
202 |
+
# handRectangle.x -= handRectangle.width / 2.f;
|
203 |
+
# handRectangle.y -= handRectangle.height / 2.f;
|
204 |
+
x -= width / 2
|
205 |
+
y -= width / 2 # width = height
|
206 |
+
# overflow the image
|
207 |
+
if x < 0: x = 0
|
208 |
+
if y < 0: y = 0
|
209 |
+
width1 = width
|
210 |
+
width2 = width
|
211 |
+
if x + width > image_width: width1 = image_width - x
|
212 |
+
if y + width > image_height: width2 = image_height - y
|
213 |
+
width = min(width1, width2)
|
214 |
+
# the max hand box value is 20 pixels
|
215 |
+
if width >= 20:
|
216 |
+
detect_result.append([int(x), int(y), int(width), is_left])
|
217 |
+
|
218 |
+
'''
|
219 |
+
return value: [[x, y, w, True if left hand else False]].
|
220 |
+
width=height since the network require squared input.
|
221 |
+
x, y is the coordinate of top left
|
222 |
+
'''
|
223 |
+
return detect_result
|
224 |
+
|
225 |
+
|
226 |
+
# Written by Lvmin
|
227 |
+
def faceDetect(candidate, subset, oriImg):
|
228 |
+
# left right eye ear 14 15 16 17
|
229 |
+
detect_result = []
|
230 |
+
image_height, image_width = oriImg.shape[0:2]
|
231 |
+
for person in subset.astype(int):
|
232 |
+
has_head = person[0] > -1
|
233 |
+
if not has_head:
|
234 |
+
continue
|
235 |
+
|
236 |
+
has_left_eye = person[14] > -1
|
237 |
+
has_right_eye = person[15] > -1
|
238 |
+
has_left_ear = person[16] > -1
|
239 |
+
has_right_ear = person[17] > -1
|
240 |
+
|
241 |
+
if not (has_left_eye or has_right_eye or has_left_ear or has_right_ear):
|
242 |
+
continue
|
243 |
+
|
244 |
+
head, left_eye, right_eye, left_ear, right_ear = person[[0, 14, 15, 16, 17]]
|
245 |
+
|
246 |
+
width = 0.0
|
247 |
+
x0, y0 = candidate[head][:2]
|
248 |
+
|
249 |
+
if has_left_eye:
|
250 |
+
x1, y1 = candidate[left_eye][:2]
|
251 |
+
d = max(abs(x0 - x1), abs(y0 - y1))
|
252 |
+
width = max(width, d * 3.0)
|
253 |
+
|
254 |
+
if has_right_eye:
|
255 |
+
x1, y1 = candidate[right_eye][:2]
|
256 |
+
d = max(abs(x0 - x1), abs(y0 - y1))
|
257 |
+
width = max(width, d * 3.0)
|
258 |
+
|
259 |
+
if has_left_ear:
|
260 |
+
x1, y1 = candidate[left_ear][:2]
|
261 |
+
d = max(abs(x0 - x1), abs(y0 - y1))
|
262 |
+
width = max(width, d * 1.5)
|
263 |
+
|
264 |
+
if has_right_ear:
|
265 |
+
x1, y1 = candidate[right_ear][:2]
|
266 |
+
d = max(abs(x0 - x1), abs(y0 - y1))
|
267 |
+
width = max(width, d * 1.5)
|
268 |
+
|
269 |
+
x, y = x0, y0
|
270 |
+
|
271 |
+
x -= width
|
272 |
+
y -= width
|
273 |
+
|
274 |
+
if x < 0:
|
275 |
+
x = 0
|
276 |
+
|
277 |
+
if y < 0:
|
278 |
+
y = 0
|
279 |
+
|
280 |
+
width1 = width * 2
|
281 |
+
width2 = width * 2
|
282 |
+
|
283 |
+
if x + width > image_width:
|
284 |
+
width1 = image_width - x
|
285 |
+
|
286 |
+
if y + width > image_height:
|
287 |
+
width2 = image_height - y
|
288 |
+
|
289 |
+
width = min(width1, width2)
|
290 |
+
|
291 |
+
if width >= 20:
|
292 |
+
detect_result.append([int(x), int(y), int(width)])
|
293 |
+
|
294 |
+
return detect_result
|
295 |
+
|
296 |
+
|
297 |
+
# get max index of 2d array
|
298 |
+
def npmax(array):
|
299 |
+
arrayindex = array.argmax(1)
|
300 |
+
arrayvalue = array.max(1)
|
301 |
+
i = arrayvalue.argmax()
|
302 |
+
j = arrayindex[i]
|
303 |
+
return i, j
|
controlnet_aux/src/controlnet_aux/dwpose/wholebody.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
+
import os
|
3 |
+
import numpy as np
|
4 |
+
import warnings
|
5 |
+
|
6 |
+
try:
|
7 |
+
import mmcv
|
8 |
+
except ImportError:
|
9 |
+
warnings.warn(
|
10 |
+
"The module 'mmcv' is not installed. The package will have limited functionality. Please install it using the command: mim install 'mmcv>=2.0.1'"
|
11 |
+
)
|
12 |
+
|
13 |
+
try:
|
14 |
+
from mmpose.apis import inference_topdown
|
15 |
+
from mmpose.apis import init_model as init_pose_estimator
|
16 |
+
from mmpose.evaluation.functional import nms
|
17 |
+
from mmpose.utils import adapt_mmdet_pipeline
|
18 |
+
from mmpose.structures import merge_data_samples
|
19 |
+
except ImportError:
|
20 |
+
warnings.warn(
|
21 |
+
"The module 'mmpose' is not installed. The package will have limited functionality. Please install it using the command: mim install 'mmpose>=1.1.0'"
|
22 |
+
)
|
23 |
+
|
24 |
+
try:
|
25 |
+
from mmdet.apis import inference_detector, init_detector
|
26 |
+
except ImportError:
|
27 |
+
warnings.warn(
|
28 |
+
"The module 'mmdet' is not installed. The package will have limited functionality. Please install it using the command: mim install 'mmdet>=3.1.0'"
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
class Wholebody:
|
33 |
+
def __init__(self,
|
34 |
+
det_config=None, det_ckpt=None,
|
35 |
+
pose_config=None, pose_ckpt=None,
|
36 |
+
device="cpu"):
|
37 |
+
|
38 |
+
if det_config is None:
|
39 |
+
det_config = os.path.join(os.path.dirname(__file__), "yolox_config/yolox_l_8xb8-300e_coco.py")
|
40 |
+
|
41 |
+
if pose_config is None:
|
42 |
+
pose_config = os.path.join(os.path.dirname(__file__), "dwpose_config/dwpose-l_384x288.py")
|
43 |
+
|
44 |
+
if det_ckpt is None:
|
45 |
+
det_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth'
|
46 |
+
# det_ckpt = '~/.cache/torch/hub/checkpoints/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth'
|
47 |
+
|
48 |
+
if pose_ckpt is None:
|
49 |
+
pose_ckpt = "https://huggingface.co/wanghaofan/dw-ll_ucoco_384/resolve/main/dw-ll_ucoco_384.pth"
|
50 |
+
# pose_ckpt = "~/.cache/torch/hub/checkpoints/dw-ll_ucoco_384.pth"
|
51 |
+
|
52 |
+
# build detector
|
53 |
+
self.detector = init_detector(det_config, det_ckpt, device=device)
|
54 |
+
self.detector.cfg = adapt_mmdet_pipeline(self.detector.cfg)
|
55 |
+
|
56 |
+
# build pose estimator
|
57 |
+
self.pose_estimator = init_pose_estimator(
|
58 |
+
pose_config,
|
59 |
+
pose_ckpt,
|
60 |
+
device=device)
|
61 |
+
|
62 |
+
def to(self, device):
|
63 |
+
self.detector.to(device)
|
64 |
+
self.pose_estimator.to(device)
|
65 |
+
return self
|
66 |
+
|
67 |
+
def __call__(self, oriImg):
|
68 |
+
# predict bbox
|
69 |
+
det_result = inference_detector(self.detector, oriImg)
|
70 |
+
pred_instance = det_result.pred_instances.cpu().numpy()
|
71 |
+
bboxes = np.concatenate(
|
72 |
+
(pred_instance.bboxes, pred_instance.scores[:, None]), axis=1)
|
73 |
+
bboxes = bboxes[np.logical_and(pred_instance.labels == 0,
|
74 |
+
pred_instance.scores > 0.5)]
|
75 |
+
|
76 |
+
# set NMS threshold
|
77 |
+
bboxes = bboxes[nms(bboxes, 0.7), :4]
|
78 |
+
|
79 |
+
# predict keypoints
|
80 |
+
if len(bboxes) == 0:
|
81 |
+
pose_results = inference_topdown(self.pose_estimator, oriImg)
|
82 |
+
else:
|
83 |
+
pose_results = inference_topdown(self.pose_estimator, oriImg, bboxes)
|
84 |
+
preds = merge_data_samples(pose_results)
|
85 |
+
preds = preds.pred_instances
|
86 |
+
|
87 |
+
# preds = pose_results[0].pred_instances
|
88 |
+
keypoints = preds.get('transformed_keypoints',
|
89 |
+
preds.keypoints)
|
90 |
+
if 'keypoint_scores' in preds:
|
91 |
+
scores = preds.keypoint_scores
|
92 |
+
else:
|
93 |
+
scores = np.ones(keypoints.shape[:-1])
|
94 |
+
|
95 |
+
if 'keypoints_visible' in preds:
|
96 |
+
visible = preds.keypoints_visible
|
97 |
+
else:
|
98 |
+
visible = np.ones(keypoints.shape[:-1])
|
99 |
+
keypoints_info = np.concatenate(
|
100 |
+
(keypoints, scores[..., None], visible[..., None]),
|
101 |
+
axis=-1)
|
102 |
+
# compute neck joint
|
103 |
+
neck = np.mean(keypoints_info[:, [5, 6]], axis=1)
|
104 |
+
# neck score when visualizing pred
|
105 |
+
neck[:, 2:4] = np.logical_and(
|
106 |
+
keypoints_info[:, 5, 2:4] > 0.3,
|
107 |
+
keypoints_info[:, 6, 2:4] > 0.3).astype(int)
|
108 |
+
new_keypoints_info = np.insert(
|
109 |
+
keypoints_info, 17, neck, axis=1)
|
110 |
+
mmpose_idx = [
|
111 |
+
17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3
|
112 |
+
]
|
113 |
+
openpose_idx = [
|
114 |
+
1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17
|
115 |
+
]
|
116 |
+
new_keypoints_info[:, openpose_idx] = \
|
117 |
+
new_keypoints_info[:, mmpose_idx]
|
118 |
+
keypoints_info = new_keypoints_info
|
119 |
+
|
120 |
+
keypoints, scores, visible = keypoints_info[
|
121 |
+
..., :2], keypoints_info[..., 2], keypoints_info[..., 3]
|
122 |
+
|
123 |
+
return keypoints, scores
|
controlnet_aux/src/controlnet_aux/dwpose/yolox_config/yolox_l_8xb8-300e_coco.py
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
img_scale = (640, 640) # width, height
|
2 |
+
|
3 |
+
# model settings
|
4 |
+
model = dict(
|
5 |
+
type='YOLOX',
|
6 |
+
data_preprocessor=dict(
|
7 |
+
type='DetDataPreprocessor',
|
8 |
+
pad_size_divisor=32,
|
9 |
+
batch_augments=[
|
10 |
+
dict(
|
11 |
+
type='BatchSyncRandomResize',
|
12 |
+
random_size_range=(480, 800),
|
13 |
+
size_divisor=32,
|
14 |
+
interval=10)
|
15 |
+
]),
|
16 |
+
backbone=dict(
|
17 |
+
type='CSPDarknet',
|
18 |
+
deepen_factor=1.0,
|
19 |
+
widen_factor=1.0,
|
20 |
+
out_indices=(2, 3, 4),
|
21 |
+
use_depthwise=False,
|
22 |
+
spp_kernal_sizes=(5, 9, 13),
|
23 |
+
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
|
24 |
+
act_cfg=dict(type='Swish'),
|
25 |
+
),
|
26 |
+
neck=dict(
|
27 |
+
type='YOLOXPAFPN',
|
28 |
+
in_channels=[256, 512, 1024],
|
29 |
+
out_channels=256,
|
30 |
+
num_csp_blocks=3,
|
31 |
+
use_depthwise=False,
|
32 |
+
upsample_cfg=dict(scale_factor=2, mode='nearest'),
|
33 |
+
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
|
34 |
+
act_cfg=dict(type='Swish')),
|
35 |
+
bbox_head=dict(
|
36 |
+
type='YOLOXHead',
|
37 |
+
num_classes=80,
|
38 |
+
in_channels=256,
|
39 |
+
feat_channels=256,
|
40 |
+
stacked_convs=2,
|
41 |
+
strides=(8, 16, 32),
|
42 |
+
use_depthwise=False,
|
43 |
+
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
|
44 |
+
act_cfg=dict(type='Swish'),
|
45 |
+
loss_cls=dict(
|
46 |
+
type='CrossEntropyLoss',
|
47 |
+
use_sigmoid=True,
|
48 |
+
reduction='sum',
|
49 |
+
loss_weight=1.0),
|
50 |
+
loss_bbox=dict(
|
51 |
+
type='IoULoss',
|
52 |
+
mode='square',
|
53 |
+
eps=1e-16,
|
54 |
+
reduction='sum',
|
55 |
+
loss_weight=5.0),
|
56 |
+
loss_obj=dict(
|
57 |
+
type='CrossEntropyLoss',
|
58 |
+
use_sigmoid=True,
|
59 |
+
reduction='sum',
|
60 |
+
loss_weight=1.0),
|
61 |
+
loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0)),
|
62 |
+
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
|
63 |
+
# In order to align the source code, the threshold of the val phase is
|
64 |
+
# 0.01, and the threshold of the test phase is 0.001.
|
65 |
+
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
|
66 |
+
|
67 |
+
# dataset settings
|
68 |
+
data_root = 'data/coco/'
|
69 |
+
dataset_type = 'CocoDataset'
|
70 |
+
|
71 |
+
# Example to use different file client
|
72 |
+
# Method 1: simply set the data root and let the file I/O module
|
73 |
+
# automatically infer from prefix (not support LMDB and Memcache yet)
|
74 |
+
|
75 |
+
# data_root = 's3://openmmlab/datasets/detection/coco/'
|
76 |
+
|
77 |
+
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
|
78 |
+
# backend_args = dict(
|
79 |
+
# backend='petrel',
|
80 |
+
# path_mapping=dict({
|
81 |
+
# './data/': 's3://openmmlab/datasets/detection/',
|
82 |
+
# 'data/': 's3://openmmlab/datasets/detection/'
|
83 |
+
# }))
|
84 |
+
backend_args = None
|
85 |
+
|
86 |
+
train_pipeline = [
|
87 |
+
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
|
88 |
+
dict(
|
89 |
+
type='RandomAffine',
|
90 |
+
scaling_ratio_range=(0.1, 2),
|
91 |
+
# img_scale is (width, height)
|
92 |
+
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
|
93 |
+
dict(
|
94 |
+
type='MixUp',
|
95 |
+
img_scale=img_scale,
|
96 |
+
ratio_range=(0.8, 1.6),
|
97 |
+
pad_val=114.0),
|
98 |
+
dict(type='YOLOXHSVRandomAug'),
|
99 |
+
dict(type='RandomFlip', prob=0.5),
|
100 |
+
# According to the official implementation, multi-scale
|
101 |
+
# training is not considered here but in the
|
102 |
+
# 'mmdet/models/detectors/yolox.py'.
|
103 |
+
# Resize and Pad are for the last 15 epochs when Mosaic,
|
104 |
+
# RandomAffine, and MixUp are closed by YOLOXModeSwitchHook.
|
105 |
+
dict(type='Resize', scale=img_scale, keep_ratio=True),
|
106 |
+
dict(
|
107 |
+
type='Pad',
|
108 |
+
pad_to_square=True,
|
109 |
+
# If the image is three-channel, the pad value needs
|
110 |
+
# to be set separately for each channel.
|
111 |
+
pad_val=dict(img=(114.0, 114.0, 114.0))),
|
112 |
+
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
|
113 |
+
dict(type='PackDetInputs')
|
114 |
+
]
|
115 |
+
|
116 |
+
train_dataset = dict(
|
117 |
+
# use MultiImageMixDataset wrapper to support mosaic and mixup
|
118 |
+
type='MultiImageMixDataset',
|
119 |
+
dataset=dict(
|
120 |
+
type=dataset_type,
|
121 |
+
data_root=data_root,
|
122 |
+
ann_file='annotations/instances_train2017.json',
|
123 |
+
data_prefix=dict(img='train2017/'),
|
124 |
+
pipeline=[
|
125 |
+
dict(type='LoadImageFromFile', backend_args=backend_args),
|
126 |
+
dict(type='LoadAnnotations', with_bbox=True)
|
127 |
+
],
|
128 |
+
filter_cfg=dict(filter_empty_gt=False, min_size=32),
|
129 |
+
backend_args=backend_args),
|
130 |
+
pipeline=train_pipeline)
|
131 |
+
|
132 |
+
test_pipeline = [
|
133 |
+
dict(type='LoadImageFromFile', backend_args=backend_args),
|
134 |
+
dict(type='Resize', scale=img_scale, keep_ratio=True),
|
135 |
+
dict(
|
136 |
+
type='Pad',
|
137 |
+
pad_to_square=True,
|
138 |
+
pad_val=dict(img=(114.0, 114.0, 114.0))),
|
139 |
+
dict(type='LoadAnnotations', with_bbox=True),
|
140 |
+
dict(
|
141 |
+
type='PackDetInputs',
|
142 |
+
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
|
143 |
+
'scale_factor'))
|
144 |
+
]
|
145 |
+
|
146 |
+
train_dataloader = dict(
|
147 |
+
batch_size=8,
|
148 |
+
num_workers=4,
|
149 |
+
persistent_workers=True,
|
150 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
151 |
+
dataset=train_dataset)
|
152 |
+
val_dataloader = dict(
|
153 |
+
batch_size=8,
|
154 |
+
num_workers=4,
|
155 |
+
persistent_workers=True,
|
156 |
+
drop_last=False,
|
157 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
158 |
+
dataset=dict(
|
159 |
+
type=dataset_type,
|
160 |
+
data_root=data_root,
|
161 |
+
ann_file='annotations/instances_val2017.json',
|
162 |
+
data_prefix=dict(img='val2017/'),
|
163 |
+
test_mode=True,
|
164 |
+
pipeline=test_pipeline,
|
165 |
+
backend_args=backend_args))
|
166 |
+
test_dataloader = val_dataloader
|
167 |
+
|
168 |
+
val_evaluator = dict(
|
169 |
+
type='CocoMetric',
|
170 |
+
ann_file=data_root + 'annotations/instances_val2017.json',
|
171 |
+
metric='bbox',
|
172 |
+
backend_args=backend_args)
|
173 |
+
test_evaluator = val_evaluator
|
174 |
+
|
175 |
+
# training settings
|
176 |
+
max_epochs = 300
|
177 |
+
num_last_epochs = 15
|
178 |
+
interval = 10
|
179 |
+
|
180 |
+
train_cfg = dict(max_epochs=max_epochs, val_interval=interval)
|
181 |
+
|
182 |
+
# optimizer
|
183 |
+
# default 8 gpu
|
184 |
+
base_lr = 0.01
|
185 |
+
optim_wrapper = dict(
|
186 |
+
type='OptimWrapper',
|
187 |
+
optimizer=dict(
|
188 |
+
type='SGD', lr=base_lr, momentum=0.9, weight_decay=5e-4,
|
189 |
+
nesterov=True),
|
190 |
+
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
|
191 |
+
|
192 |
+
# learning rate
|
193 |
+
param_scheduler = [
|
194 |
+
dict(
|
195 |
+
# use quadratic formula to warm up 5 epochs
|
196 |
+
# and lr is updated by iteration
|
197 |
+
# TODO: fix default scope in get function
|
198 |
+
type='mmdet.QuadraticWarmupLR',
|
199 |
+
by_epoch=True,
|
200 |
+
begin=0,
|
201 |
+
end=5,
|
202 |
+
convert_to_iter_based=True),
|
203 |
+
dict(
|
204 |
+
# use cosine lr from 5 to 285 epoch
|
205 |
+
type='CosineAnnealingLR',
|
206 |
+
eta_min=base_lr * 0.05,
|
207 |
+
begin=5,
|
208 |
+
T_max=max_epochs - num_last_epochs,
|
209 |
+
end=max_epochs - num_last_epochs,
|
210 |
+
by_epoch=True,
|
211 |
+
convert_to_iter_based=True),
|
212 |
+
dict(
|
213 |
+
# use fixed lr during last 15 epochs
|
214 |
+
type='ConstantLR',
|
215 |
+
by_epoch=True,
|
216 |
+
factor=1,
|
217 |
+
begin=max_epochs - num_last_epochs,
|
218 |
+
end=max_epochs,
|
219 |
+
)
|
220 |
+
]
|
221 |
+
|
222 |
+
default_hooks = dict(
|
223 |
+
checkpoint=dict(
|
224 |
+
interval=interval,
|
225 |
+
max_keep_ckpts=3 # only keep latest 3 checkpoints
|
226 |
+
))
|
227 |
+
|
228 |
+
custom_hooks = [
|
229 |
+
dict(
|
230 |
+
type='YOLOXModeSwitchHook',
|
231 |
+
num_last_epochs=num_last_epochs,
|
232 |
+
priority=48),
|
233 |
+
dict(type='SyncNormHook', priority=48),
|
234 |
+
dict(
|
235 |
+
type='EMAHook',
|
236 |
+
ema_type='ExpMomentumEMA',
|
237 |
+
momentum=0.0001,
|
238 |
+
update_buffers=True,
|
239 |
+
priority=49)
|
240 |
+
]
|
241 |
+
|
242 |
+
# NOTE: `auto_scale_lr` is for automatically scaling LR,
|
243 |
+
# USER SHOULD NOT CHANGE ITS VALUES.
|
244 |
+
# base_batch_size = (8 GPUs) x (8 samples per GPU)
|
245 |
+
auto_scale_lr = dict(base_batch_size=64)
|
controlnet_aux/src/controlnet_aux/hed/__init__.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This is an improved version and model of HED edge detection with Apache License, Version 2.0.
|
2 |
+
# Please use this implementation in your products
|
3 |
+
# This implementation may produce slightly different results from Saining Xie's official implementations,
|
4 |
+
# but it generates smoother edges and is more suitable for ControlNet as well as other image-to-image translations.
|
5 |
+
# Different from official models and other implementations, this is an RGB-input model (rather than BGR)
|
6 |
+
# and in this way it works better for gradio's RGB protocol
|
7 |
+
|
8 |
+
import os
|
9 |
+
import warnings
|
10 |
+
|
11 |
+
import cv2
|
12 |
+
import numpy as np
|
13 |
+
import torch
|
14 |
+
from einops import rearrange
|
15 |
+
from huggingface_hub import hf_hub_download
|
16 |
+
from PIL import Image
|
17 |
+
|
18 |
+
from ..util import HWC3, nms, resize_image, safe_step
|
19 |
+
|
20 |
+
|
21 |
+
class DoubleConvBlock(torch.nn.Module):
|
22 |
+
def __init__(self, input_channel, output_channel, layer_number):
|
23 |
+
super().__init__()
|
24 |
+
self.convs = torch.nn.Sequential()
|
25 |
+
self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
|
26 |
+
for i in range(1, layer_number):
|
27 |
+
self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
|
28 |
+
self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0)
|
29 |
+
|
30 |
+
def __call__(self, x, down_sampling=False):
|
31 |
+
h = x
|
32 |
+
if down_sampling:
|
33 |
+
h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
|
34 |
+
for conv in self.convs:
|
35 |
+
h = conv(h)
|
36 |
+
h = torch.nn.functional.relu(h)
|
37 |
+
return h, self.projection(h)
|
38 |
+
|
39 |
+
|
40 |
+
class ControlNetHED_Apache2(torch.nn.Module):
|
41 |
+
def __init__(self):
|
42 |
+
super().__init__()
|
43 |
+
self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
|
44 |
+
self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2)
|
45 |
+
self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2)
|
46 |
+
self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3)
|
47 |
+
self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3)
|
48 |
+
self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3)
|
49 |
+
|
50 |
+
def __call__(self, x):
|
51 |
+
h = x - self.norm
|
52 |
+
h, projection1 = self.block1(h)
|
53 |
+
h, projection2 = self.block2(h, down_sampling=True)
|
54 |
+
h, projection3 = self.block3(h, down_sampling=True)
|
55 |
+
h, projection4 = self.block4(h, down_sampling=True)
|
56 |
+
h, projection5 = self.block5(h, down_sampling=True)
|
57 |
+
return projection1, projection2, projection3, projection4, projection5
|
58 |
+
|
59 |
+
class HEDdetector:
|
60 |
+
def __init__(self, netNetwork):
|
61 |
+
self.netNetwork = netNetwork
|
62 |
+
|
63 |
+
@classmethod
|
64 |
+
def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=None):
|
65 |
+
filename = filename or "ControlNetHED.pth"
|
66 |
+
|
67 |
+
if os.path.isdir(pretrained_model_or_path):
|
68 |
+
model_path = os.path.join(pretrained_model_or_path, filename)
|
69 |
+
else:
|
70 |
+
model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir)
|
71 |
+
|
72 |
+
netNetwork = ControlNetHED_Apache2()
|
73 |
+
netNetwork.load_state_dict(torch.load(model_path, map_location='cpu'))
|
74 |
+
netNetwork.float().eval()
|
75 |
+
|
76 |
+
return cls(netNetwork)
|
77 |
+
|
78 |
+
def to(self, device):
|
79 |
+
self.netNetwork.to(device)
|
80 |
+
return self
|
81 |
+
|
82 |
+
def __call__(self, input_image, detect_resolution=512, image_resolution=512, safe=False, output_type="pil", scribble=False, **kwargs):
|
83 |
+
if "return_pil" in kwargs:
|
84 |
+
warnings.warn("return_pil is deprecated. Use output_type instead.", DeprecationWarning)
|
85 |
+
output_type = "pil" if kwargs["return_pil"] else "np"
|
86 |
+
if type(output_type) is bool:
|
87 |
+
warnings.warn("Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions")
|
88 |
+
if output_type:
|
89 |
+
output_type = "pil"
|
90 |
+
|
91 |
+
device = next(iter(self.netNetwork.parameters())).device
|
92 |
+
if not isinstance(input_image, np.ndarray):
|
93 |
+
input_image = np.array(input_image, dtype=np.uint8)
|
94 |
+
|
95 |
+
input_image = HWC3(input_image)
|
96 |
+
input_image = resize_image(input_image, detect_resolution)
|
97 |
+
|
98 |
+
assert input_image.ndim == 3
|
99 |
+
H, W, C = input_image.shape
|
100 |
+
with torch.no_grad():
|
101 |
+
image_hed = torch.from_numpy(input_image.copy()).float().to(device)
|
102 |
+
image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
|
103 |
+
edges = self.netNetwork(image_hed)
|
104 |
+
edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
|
105 |
+
edges = [cv2.resize(e, (W, H), interpolation=cv2.INTER_LINEAR) for e in edges]
|
106 |
+
edges = np.stack(edges, axis=2)
|
107 |
+
edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
|
108 |
+
if safe:
|
109 |
+
edge = safe_step(edge)
|
110 |
+
edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
|
111 |
+
|
112 |
+
detected_map = edge
|
113 |
+
detected_map = HWC3(detected_map)
|
114 |
+
|
115 |
+
img = resize_image(input_image, image_resolution)
|
116 |
+
H, W, C = img.shape
|
117 |
+
|
118 |
+
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
|
119 |
+
|
120 |
+
if scribble:
|
121 |
+
detected_map = nms(detected_map, 127, 3.0)
|
122 |
+
detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0)
|
123 |
+
detected_map[detected_map > 4] = 255
|
124 |
+
detected_map[detected_map < 255] = 0
|
125 |
+
|
126 |
+
if output_type == "pil":
|
127 |
+
detected_map = Image.fromarray(detected_map)
|
128 |
+
|
129 |
+
return detected_map
|
controlnet_aux/src/controlnet_aux/leres/__init__.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
from huggingface_hub import hf_hub_download
|
7 |
+
from PIL import Image
|
8 |
+
|
9 |
+
from ..util import HWC3, resize_image
|
10 |
+
from .leres.depthmap import estimateboost, estimateleres
|
11 |
+
from .leres.multi_depth_model_woauxi import RelDepthModel
|
12 |
+
from .leres.net_tools import strip_prefix_if_present
|
13 |
+
from .pix2pix.models.pix2pix4depth_model import Pix2Pix4DepthModel
|
14 |
+
from .pix2pix.options.test_options import TestOptions
|
15 |
+
|
16 |
+
|
17 |
+
class LeresDetector:
|
18 |
+
def __init__(self, model, pix2pixmodel):
|
19 |
+
self.model = model
|
20 |
+
self.pix2pixmodel = pix2pixmodel
|
21 |
+
|
22 |
+
@classmethod
|
23 |
+
def from_pretrained(cls, pretrained_model_or_path, filename=None, pix2pix_filename=None, cache_dir=None):
|
24 |
+
filename = filename or "res101.pth"
|
25 |
+
pix2pix_filename = pix2pix_filename or "latest_net_G.pth"
|
26 |
+
|
27 |
+
if os.path.isdir(pretrained_model_or_path):
|
28 |
+
model_path = os.path.join(pretrained_model_or_path, filename)
|
29 |
+
else:
|
30 |
+
model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir)
|
31 |
+
|
32 |
+
checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
|
33 |
+
|
34 |
+
model = RelDepthModel(backbone='resnext101')
|
35 |
+
model.load_state_dict(strip_prefix_if_present(checkpoint['depth_model'], "module."), strict=True)
|
36 |
+
del checkpoint
|
37 |
+
|
38 |
+
if os.path.isdir(pretrained_model_or_path):
|
39 |
+
model_path = os.path.join(pretrained_model_or_path, pix2pix_filename)
|
40 |
+
else:
|
41 |
+
model_path = hf_hub_download(pretrained_model_or_path, pix2pix_filename, cache_dir=cache_dir)
|
42 |
+
|
43 |
+
opt = TestOptions().parse()
|
44 |
+
if not torch.cuda.is_available():
|
45 |
+
opt.gpu_ids = [] # cpu mode
|
46 |
+
pix2pixmodel = Pix2Pix4DepthModel(opt)
|
47 |
+
pix2pixmodel.save_dir = os.path.dirname(model_path)
|
48 |
+
pix2pixmodel.load_networks('latest')
|
49 |
+
pix2pixmodel.eval()
|
50 |
+
|
51 |
+
return cls(model, pix2pixmodel)
|
52 |
+
|
53 |
+
def to(self, device):
|
54 |
+
self.model.to(device)
|
55 |
+
# TODO - refactor pix2pix implementation to support device migration
|
56 |
+
# self.pix2pixmodel.to(device)
|
57 |
+
return self
|
58 |
+
|
59 |
+
def __call__(self, input_image, thr_a=0, thr_b=0, boost=False, detect_resolution=512, image_resolution=512, output_type="pil"):
|
60 |
+
device = next(iter(self.model.parameters())).device
|
61 |
+
if not isinstance(input_image, np.ndarray):
|
62 |
+
input_image = np.array(input_image, dtype=np.uint8)
|
63 |
+
|
64 |
+
input_image = HWC3(input_image)
|
65 |
+
input_image = resize_image(input_image, detect_resolution)
|
66 |
+
|
67 |
+
assert input_image.ndim == 3
|
68 |
+
height, width, dim = input_image.shape
|
69 |
+
|
70 |
+
with torch.no_grad():
|
71 |
+
|
72 |
+
if boost:
|
73 |
+
depth = estimateboost(input_image, self.model, 0, self.pix2pixmodel, max(width, height))
|
74 |
+
else:
|
75 |
+
depth = estimateleres(input_image, self.model, width, height)
|
76 |
+
|
77 |
+
numbytes=2
|
78 |
+
depth_min = depth.min()
|
79 |
+
depth_max = depth.max()
|
80 |
+
max_val = (2**(8*numbytes))-1
|
81 |
+
|
82 |
+
# check output before normalizing and mapping to 16 bit
|
83 |
+
if depth_max - depth_min > np.finfo("float").eps:
|
84 |
+
out = max_val * (depth - depth_min) / (depth_max - depth_min)
|
85 |
+
else:
|
86 |
+
out = np.zeros(depth.shape)
|
87 |
+
|
88 |
+
# single channel, 16 bit image
|
89 |
+
depth_image = out.astype("uint16")
|
90 |
+
|
91 |
+
# convert to uint8
|
92 |
+
depth_image = cv2.convertScaleAbs(depth_image, alpha=(255.0/65535.0))
|
93 |
+
|
94 |
+
# remove near
|
95 |
+
if thr_a != 0:
|
96 |
+
thr_a = ((thr_a/100)*255)
|
97 |
+
depth_image = cv2.threshold(depth_image, thr_a, 255, cv2.THRESH_TOZERO)[1]
|
98 |
+
|
99 |
+
# invert image
|
100 |
+
depth_image = cv2.bitwise_not(depth_image)
|
101 |
+
|
102 |
+
# remove bg
|
103 |
+
if thr_b != 0:
|
104 |
+
thr_b = ((thr_b/100)*255)
|
105 |
+
depth_image = cv2.threshold(depth_image, thr_b, 255, cv2.THRESH_TOZERO)[1]
|
106 |
+
|
107 |
+
detected_map = depth_image
|
108 |
+
detected_map = HWC3(detected_map)
|
109 |
+
|
110 |
+
img = resize_image(input_image, image_resolution)
|
111 |
+
H, W, C = img.shape
|
112 |
+
|
113 |
+
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
|
114 |
+
|
115 |
+
if output_type == "pil":
|
116 |
+
detected_map = Image.fromarray(detected_map)
|
117 |
+
|
118 |
+
return detected_map
|
controlnet_aux/src/controlnet_aux/leres/leres/LICENSE
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
https://github.com/thygate/stable-diffusion-webui-depthmap-script
|
2 |
+
|
3 |
+
MIT License
|
4 |
+
|
5 |
+
Copyright (c) 2023 Bob Thiry
|
6 |
+
|
7 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
8 |
+
of this software and associated documentation files (the "Software"), to deal
|
9 |
+
in the Software without restriction, including without limitation the rights
|
10 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
11 |
+
copies of the Software, and to permit persons to whom the Software is
|
12 |
+
furnished to do so, subject to the following conditions:
|
13 |
+
|
14 |
+
The above copyright notice and this permission notice shall be included in all
|
15 |
+
copies or substantial portions of the Software.
|
16 |
+
|
17 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
18 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
19 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
20 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
21 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
22 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
23 |
+
SOFTWARE.
|
controlnet_aux/src/controlnet_aux/leres/leres/Resnet.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch.nn as NN
|
3 |
+
|
4 |
+
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
|
5 |
+
'resnet152']
|
6 |
+
|
7 |
+
|
8 |
+
model_urls = {
|
9 |
+
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
|
10 |
+
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
|
11 |
+
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
|
12 |
+
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
|
13 |
+
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
|
14 |
+
}
|
15 |
+
|
16 |
+
|
17 |
+
def conv3x3(in_planes, out_planes, stride=1):
|
18 |
+
"""3x3 convolution with padding"""
|
19 |
+
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
20 |
+
padding=1, bias=False)
|
21 |
+
|
22 |
+
|
23 |
+
class BasicBlock(nn.Module):
|
24 |
+
expansion = 1
|
25 |
+
|
26 |
+
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
27 |
+
super(BasicBlock, self).__init__()
|
28 |
+
self.conv1 = conv3x3(inplanes, planes, stride)
|
29 |
+
self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
|
30 |
+
self.relu = nn.ReLU(inplace=True)
|
31 |
+
self.conv2 = conv3x3(planes, planes)
|
32 |
+
self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
|
33 |
+
self.downsample = downsample
|
34 |
+
self.stride = stride
|
35 |
+
|
36 |
+
def forward(self, x):
|
37 |
+
residual = x
|
38 |
+
|
39 |
+
out = self.conv1(x)
|
40 |
+
out = self.bn1(out)
|
41 |
+
out = self.relu(out)
|
42 |
+
|
43 |
+
out = self.conv2(out)
|
44 |
+
out = self.bn2(out)
|
45 |
+
|
46 |
+
if self.downsample is not None:
|
47 |
+
residual = self.downsample(x)
|
48 |
+
|
49 |
+
out += residual
|
50 |
+
out = self.relu(out)
|
51 |
+
|
52 |
+
return out
|
53 |
+
|
54 |
+
|
55 |
+
class Bottleneck(nn.Module):
|
56 |
+
expansion = 4
|
57 |
+
|
58 |
+
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
59 |
+
super(Bottleneck, self).__init__()
|
60 |
+
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
|
61 |
+
self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
|
62 |
+
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
|
63 |
+
padding=1, bias=False)
|
64 |
+
self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
|
65 |
+
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
|
66 |
+
self.bn3 = NN.BatchNorm2d(planes * self.expansion) #NN.BatchNorm2d
|
67 |
+
self.relu = nn.ReLU(inplace=True)
|
68 |
+
self.downsample = downsample
|
69 |
+
self.stride = stride
|
70 |
+
|
71 |
+
def forward(self, x):
|
72 |
+
residual = x
|
73 |
+
|
74 |
+
out = self.conv1(x)
|
75 |
+
out = self.bn1(out)
|
76 |
+
out = self.relu(out)
|
77 |
+
|
78 |
+
out = self.conv2(out)
|
79 |
+
out = self.bn2(out)
|
80 |
+
out = self.relu(out)
|
81 |
+
|
82 |
+
out = self.conv3(out)
|
83 |
+
out = self.bn3(out)
|
84 |
+
|
85 |
+
if self.downsample is not None:
|
86 |
+
residual = self.downsample(x)
|
87 |
+
|
88 |
+
out += residual
|
89 |
+
out = self.relu(out)
|
90 |
+
|
91 |
+
return out
|
92 |
+
|
93 |
+
|
94 |
+
class ResNet(nn.Module):
|
95 |
+
|
96 |
+
def __init__(self, block, layers, num_classes=1000):
|
97 |
+
self.inplanes = 64
|
98 |
+
super(ResNet, self).__init__()
|
99 |
+
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
|
100 |
+
bias=False)
|
101 |
+
self.bn1 = NN.BatchNorm2d(64) #NN.BatchNorm2d
|
102 |
+
self.relu = nn.ReLU(inplace=True)
|
103 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
104 |
+
self.layer1 = self._make_layer(block, 64, layers[0])
|
105 |
+
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
|
106 |
+
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
|
107 |
+
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
|
108 |
+
#self.avgpool = nn.AvgPool2d(7, stride=1)
|
109 |
+
#self.fc = nn.Linear(512 * block.expansion, num_classes)
|
110 |
+
|
111 |
+
for m in self.modules():
|
112 |
+
if isinstance(m, nn.Conv2d):
|
113 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
114 |
+
elif isinstance(m, nn.BatchNorm2d):
|
115 |
+
nn.init.constant_(m.weight, 1)
|
116 |
+
nn.init.constant_(m.bias, 0)
|
117 |
+
|
118 |
+
def _make_layer(self, block, planes, blocks, stride=1):
|
119 |
+
downsample = None
|
120 |
+
if stride != 1 or self.inplanes != planes * block.expansion:
|
121 |
+
downsample = nn.Sequential(
|
122 |
+
nn.Conv2d(self.inplanes, planes * block.expansion,
|
123 |
+
kernel_size=1, stride=stride, bias=False),
|
124 |
+
NN.BatchNorm2d(planes * block.expansion), #NN.BatchNorm2d
|
125 |
+
)
|
126 |
+
|
127 |
+
layers = []
|
128 |
+
layers.append(block(self.inplanes, planes, stride, downsample))
|
129 |
+
self.inplanes = planes * block.expansion
|
130 |
+
for i in range(1, blocks):
|
131 |
+
layers.append(block(self.inplanes, planes))
|
132 |
+
|
133 |
+
return nn.Sequential(*layers)
|
134 |
+
|
135 |
+
def forward(self, x):
|
136 |
+
features = []
|
137 |
+
|
138 |
+
x = self.conv1(x)
|
139 |
+
x = self.bn1(x)
|
140 |
+
x = self.relu(x)
|
141 |
+
x = self.maxpool(x)
|
142 |
+
|
143 |
+
x = self.layer1(x)
|
144 |
+
features.append(x)
|
145 |
+
x = self.layer2(x)
|
146 |
+
features.append(x)
|
147 |
+
x = self.layer3(x)
|
148 |
+
features.append(x)
|
149 |
+
x = self.layer4(x)
|
150 |
+
features.append(x)
|
151 |
+
|
152 |
+
return features
|
153 |
+
|
154 |
+
|
155 |
+
def resnet18(pretrained=True, **kwargs):
|
156 |
+
"""Constructs a ResNet-18 model.
|
157 |
+
Args:
|
158 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
159 |
+
"""
|
160 |
+
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
|
161 |
+
return model
|
162 |
+
|
163 |
+
|
164 |
+
def resnet34(pretrained=True, **kwargs):
|
165 |
+
"""Constructs a ResNet-34 model.
|
166 |
+
Args:
|
167 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
168 |
+
"""
|
169 |
+
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
|
170 |
+
return model
|
171 |
+
|
172 |
+
|
173 |
+
def resnet50(pretrained=True, **kwargs):
|
174 |
+
"""Constructs a ResNet-50 model.
|
175 |
+
Args:
|
176 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
177 |
+
"""
|
178 |
+
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
|
179 |
+
|
180 |
+
return model
|
181 |
+
|
182 |
+
|
183 |
+
def resnet101(pretrained=True, **kwargs):
|
184 |
+
"""Constructs a ResNet-101 model.
|
185 |
+
Args:
|
186 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
187 |
+
"""
|
188 |
+
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
|
189 |
+
|
190 |
+
return model
|
191 |
+
|
192 |
+
|
193 |
+
def resnet152(pretrained=True, **kwargs):
|
194 |
+
"""Constructs a ResNet-152 model.
|
195 |
+
Args:
|
196 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
197 |
+
"""
|
198 |
+
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
|
199 |
+
return model
|
controlnet_aux/src/controlnet_aux/leres/leres/Resnext_torch.py
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
import torch.nn as nn
|
4 |
+
|
5 |
+
try:
|
6 |
+
from urllib import urlretrieve
|
7 |
+
except ImportError:
|
8 |
+
from urllib.request import urlretrieve
|
9 |
+
|
10 |
+
__all__ = ['resnext101_32x8d']
|
11 |
+
|
12 |
+
|
13 |
+
model_urls = {
|
14 |
+
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
|
15 |
+
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
|
16 |
+
}
|
17 |
+
|
18 |
+
|
19 |
+
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
|
20 |
+
"""3x3 convolution with padding"""
|
21 |
+
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
22 |
+
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
23 |
+
|
24 |
+
|
25 |
+
def conv1x1(in_planes, out_planes, stride=1):
|
26 |
+
"""1x1 convolution"""
|
27 |
+
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
28 |
+
|
29 |
+
|
30 |
+
class BasicBlock(nn.Module):
|
31 |
+
expansion = 1
|
32 |
+
|
33 |
+
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
|
34 |
+
base_width=64, dilation=1, norm_layer=None):
|
35 |
+
super(BasicBlock, self).__init__()
|
36 |
+
if norm_layer is None:
|
37 |
+
norm_layer = nn.BatchNorm2d
|
38 |
+
if groups != 1 or base_width != 64:
|
39 |
+
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
|
40 |
+
if dilation > 1:
|
41 |
+
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
|
42 |
+
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
|
43 |
+
self.conv1 = conv3x3(inplanes, planes, stride)
|
44 |
+
self.bn1 = norm_layer(planes)
|
45 |
+
self.relu = nn.ReLU(inplace=True)
|
46 |
+
self.conv2 = conv3x3(planes, planes)
|
47 |
+
self.bn2 = norm_layer(planes)
|
48 |
+
self.downsample = downsample
|
49 |
+
self.stride = stride
|
50 |
+
|
51 |
+
def forward(self, x):
|
52 |
+
identity = x
|
53 |
+
|
54 |
+
out = self.conv1(x)
|
55 |
+
out = self.bn1(out)
|
56 |
+
out = self.relu(out)
|
57 |
+
|
58 |
+
out = self.conv2(out)
|
59 |
+
out = self.bn2(out)
|
60 |
+
|
61 |
+
if self.downsample is not None:
|
62 |
+
identity = self.downsample(x)
|
63 |
+
|
64 |
+
out += identity
|
65 |
+
out = self.relu(out)
|
66 |
+
|
67 |
+
return out
|
68 |
+
|
69 |
+
|
70 |
+
class Bottleneck(nn.Module):
|
71 |
+
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
|
72 |
+
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
|
73 |
+
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
|
74 |
+
# This variant is also known as ResNet V1.5 and improves accuracy according to
|
75 |
+
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
|
76 |
+
|
77 |
+
expansion = 4
|
78 |
+
|
79 |
+
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
|
80 |
+
base_width=64, dilation=1, norm_layer=None):
|
81 |
+
super(Bottleneck, self).__init__()
|
82 |
+
if norm_layer is None:
|
83 |
+
norm_layer = nn.BatchNorm2d
|
84 |
+
width = int(planes * (base_width / 64.)) * groups
|
85 |
+
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
|
86 |
+
self.conv1 = conv1x1(inplanes, width)
|
87 |
+
self.bn1 = norm_layer(width)
|
88 |
+
self.conv2 = conv3x3(width, width, stride, groups, dilation)
|
89 |
+
self.bn2 = norm_layer(width)
|
90 |
+
self.conv3 = conv1x1(width, planes * self.expansion)
|
91 |
+
self.bn3 = norm_layer(planes * self.expansion)
|
92 |
+
self.relu = nn.ReLU(inplace=True)
|
93 |
+
self.downsample = downsample
|
94 |
+
self.stride = stride
|
95 |
+
|
96 |
+
def forward(self, x):
|
97 |
+
identity = x
|
98 |
+
|
99 |
+
out = self.conv1(x)
|
100 |
+
out = self.bn1(out)
|
101 |
+
out = self.relu(out)
|
102 |
+
|
103 |
+
out = self.conv2(out)
|
104 |
+
out = self.bn2(out)
|
105 |
+
out = self.relu(out)
|
106 |
+
|
107 |
+
out = self.conv3(out)
|
108 |
+
out = self.bn3(out)
|
109 |
+
|
110 |
+
if self.downsample is not None:
|
111 |
+
identity = self.downsample(x)
|
112 |
+
|
113 |
+
out += identity
|
114 |
+
out = self.relu(out)
|
115 |
+
|
116 |
+
return out
|
117 |
+
|
118 |
+
|
119 |
+
class ResNet(nn.Module):
|
120 |
+
|
121 |
+
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
|
122 |
+
groups=1, width_per_group=64, replace_stride_with_dilation=None,
|
123 |
+
norm_layer=None):
|
124 |
+
super(ResNet, self).__init__()
|
125 |
+
if norm_layer is None:
|
126 |
+
norm_layer = nn.BatchNorm2d
|
127 |
+
self._norm_layer = norm_layer
|
128 |
+
|
129 |
+
self.inplanes = 64
|
130 |
+
self.dilation = 1
|
131 |
+
if replace_stride_with_dilation is None:
|
132 |
+
# each element in the tuple indicates if we should replace
|
133 |
+
# the 2x2 stride with a dilated convolution instead
|
134 |
+
replace_stride_with_dilation = [False, False, False]
|
135 |
+
if len(replace_stride_with_dilation) != 3:
|
136 |
+
raise ValueError("replace_stride_with_dilation should be None "
|
137 |
+
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
|
138 |
+
self.groups = groups
|
139 |
+
self.base_width = width_per_group
|
140 |
+
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
|
141 |
+
bias=False)
|
142 |
+
self.bn1 = norm_layer(self.inplanes)
|
143 |
+
self.relu = nn.ReLU(inplace=True)
|
144 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
145 |
+
self.layer1 = self._make_layer(block, 64, layers[0])
|
146 |
+
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
|
147 |
+
dilate=replace_stride_with_dilation[0])
|
148 |
+
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
|
149 |
+
dilate=replace_stride_with_dilation[1])
|
150 |
+
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
|
151 |
+
dilate=replace_stride_with_dilation[2])
|
152 |
+
#self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
153 |
+
#self.fc = nn.Linear(512 * block.expansion, num_classes)
|
154 |
+
|
155 |
+
for m in self.modules():
|
156 |
+
if isinstance(m, nn.Conv2d):
|
157 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
158 |
+
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
|
159 |
+
nn.init.constant_(m.weight, 1)
|
160 |
+
nn.init.constant_(m.bias, 0)
|
161 |
+
|
162 |
+
# Zero-initialize the last BN in each residual branch,
|
163 |
+
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
|
164 |
+
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
|
165 |
+
if zero_init_residual:
|
166 |
+
for m in self.modules():
|
167 |
+
if isinstance(m, Bottleneck):
|
168 |
+
nn.init.constant_(m.bn3.weight, 0)
|
169 |
+
elif isinstance(m, BasicBlock):
|
170 |
+
nn.init.constant_(m.bn2.weight, 0)
|
171 |
+
|
172 |
+
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
|
173 |
+
norm_layer = self._norm_layer
|
174 |
+
downsample = None
|
175 |
+
previous_dilation = self.dilation
|
176 |
+
if dilate:
|
177 |
+
self.dilation *= stride
|
178 |
+
stride = 1
|
179 |
+
if stride != 1 or self.inplanes != planes * block.expansion:
|
180 |
+
downsample = nn.Sequential(
|
181 |
+
conv1x1(self.inplanes, planes * block.expansion, stride),
|
182 |
+
norm_layer(planes * block.expansion),
|
183 |
+
)
|
184 |
+
|
185 |
+
layers = []
|
186 |
+
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
|
187 |
+
self.base_width, previous_dilation, norm_layer))
|
188 |
+
self.inplanes = planes * block.expansion
|
189 |
+
for _ in range(1, blocks):
|
190 |
+
layers.append(block(self.inplanes, planes, groups=self.groups,
|
191 |
+
base_width=self.base_width, dilation=self.dilation,
|
192 |
+
norm_layer=norm_layer))
|
193 |
+
|
194 |
+
return nn.Sequential(*layers)
|
195 |
+
|
196 |
+
def _forward_impl(self, x):
|
197 |
+
# See note [TorchScript super()]
|
198 |
+
features = []
|
199 |
+
x = self.conv1(x)
|
200 |
+
x = self.bn1(x)
|
201 |
+
x = self.relu(x)
|
202 |
+
x = self.maxpool(x)
|
203 |
+
|
204 |
+
x = self.layer1(x)
|
205 |
+
features.append(x)
|
206 |
+
|
207 |
+
x = self.layer2(x)
|
208 |
+
features.append(x)
|
209 |
+
|
210 |
+
x = self.layer3(x)
|
211 |
+
features.append(x)
|
212 |
+
|
213 |
+
x = self.layer4(x)
|
214 |
+
features.append(x)
|
215 |
+
|
216 |
+
#x = self.avgpool(x)
|
217 |
+
#x = torch.flatten(x, 1)
|
218 |
+
#x = self.fc(x)
|
219 |
+
|
220 |
+
return features
|
221 |
+
|
222 |
+
def forward(self, x):
|
223 |
+
return self._forward_impl(x)
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
def resnext101_32x8d(pretrained=True, **kwargs):
|
228 |
+
"""Constructs a ResNet-152 model.
|
229 |
+
Args:
|
230 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
231 |
+
"""
|
232 |
+
kwargs['groups'] = 32
|
233 |
+
kwargs['width_per_group'] = 8
|
234 |
+
|
235 |
+
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
|
236 |
+
return model
|
237 |
+
|
controlnet_aux/src/controlnet_aux/leres/leres/__init__.py
ADDED
File without changes
|
controlnet_aux/src/controlnet_aux/leres/leres/depthmap.py
ADDED
@@ -0,0 +1,548 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Author: thygate
|
2 |
+
# https://github.com/thygate/stable-diffusion-webui-depthmap-script
|
3 |
+
|
4 |
+
import gc
|
5 |
+
from operator import getitem
|
6 |
+
|
7 |
+
import cv2
|
8 |
+
import numpy as np
|
9 |
+
import skimage.measure
|
10 |
+
import torch
|
11 |
+
from torchvision.transforms import transforms
|
12 |
+
|
13 |
+
from ...util import torch_gc
|
14 |
+
|
15 |
+
whole_size_threshold = 1600 # R_max from the paper
|
16 |
+
pix2pixsize = 1024
|
17 |
+
|
18 |
+
def scale_torch(img):
|
19 |
+
"""
|
20 |
+
Scale the image and output it in torch.tensor.
|
21 |
+
:param img: input rgb is in shape [H, W, C], input depth/disp is in shape [H, W]
|
22 |
+
:param scale: the scale factor. float
|
23 |
+
:return: img. [C, H, W]
|
24 |
+
"""
|
25 |
+
if len(img.shape) == 2:
|
26 |
+
img = img[np.newaxis, :, :]
|
27 |
+
if img.shape[2] == 3:
|
28 |
+
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406) , (0.229, 0.224, 0.225) )])
|
29 |
+
img = transform(img.astype(np.float32))
|
30 |
+
else:
|
31 |
+
img = img.astype(np.float32)
|
32 |
+
img = torch.from_numpy(img)
|
33 |
+
return img
|
34 |
+
|
35 |
+
def estimateleres(img, model, w, h):
|
36 |
+
device = next(iter(model.parameters())).device
|
37 |
+
# leres transform input
|
38 |
+
rgb_c = img[:, :, ::-1].copy()
|
39 |
+
A_resize = cv2.resize(rgb_c, (w, h))
|
40 |
+
img_torch = scale_torch(A_resize)[None, :, :, :]
|
41 |
+
|
42 |
+
# compute
|
43 |
+
with torch.no_grad():
|
44 |
+
img_torch = img_torch.to(device)
|
45 |
+
prediction = model.depth_model(img_torch)
|
46 |
+
|
47 |
+
prediction = prediction.squeeze().cpu().numpy()
|
48 |
+
prediction = cv2.resize(prediction, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)
|
49 |
+
|
50 |
+
return prediction
|
51 |
+
|
52 |
+
def generatemask(size):
|
53 |
+
# Generates a Guassian mask
|
54 |
+
mask = np.zeros(size, dtype=np.float32)
|
55 |
+
sigma = int(size[0]/16)
|
56 |
+
k_size = int(2 * np.ceil(2 * int(size[0]/16)) + 1)
|
57 |
+
mask[int(0.15*size[0]):size[0] - int(0.15*size[0]), int(0.15*size[1]): size[1] - int(0.15*size[1])] = 1
|
58 |
+
mask = cv2.GaussianBlur(mask, (int(k_size), int(k_size)), sigma)
|
59 |
+
mask = (mask - mask.min()) / (mask.max() - mask.min())
|
60 |
+
mask = mask.astype(np.float32)
|
61 |
+
return mask
|
62 |
+
|
63 |
+
def resizewithpool(img, size):
|
64 |
+
i_size = img.shape[0]
|
65 |
+
n = int(np.floor(i_size/size))
|
66 |
+
|
67 |
+
out = skimage.measure.block_reduce(img, (n, n), np.max)
|
68 |
+
return out
|
69 |
+
|
70 |
+
def rgb2gray(rgb):
|
71 |
+
# Converts rgb to gray
|
72 |
+
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
|
73 |
+
|
74 |
+
def calculateprocessingres(img, basesize, confidence=0.1, scale_threshold=3, whole_size_threshold=3000):
|
75 |
+
# Returns the R_x resolution described in section 5 of the main paper.
|
76 |
+
|
77 |
+
# Parameters:
|
78 |
+
# img :input rgb image
|
79 |
+
# basesize : size the dilation kernel which is equal to receptive field of the network.
|
80 |
+
# confidence: value of x in R_x; allowed percentage of pixels that are not getting any contextual cue.
|
81 |
+
# scale_threshold: maximum allowed upscaling on the input image ; it has been set to 3.
|
82 |
+
# whole_size_threshold: maximum allowed resolution. (R_max from section 6 of the main paper)
|
83 |
+
|
84 |
+
# Returns:
|
85 |
+
# outputsize_scale*speed_scale :The computed R_x resolution
|
86 |
+
# patch_scale: K parameter from section 6 of the paper
|
87 |
+
|
88 |
+
# speed scale parameter is to process every image in a smaller size to accelerate the R_x resolution search
|
89 |
+
speed_scale = 32
|
90 |
+
image_dim = int(min(img.shape[0:2]))
|
91 |
+
|
92 |
+
gray = rgb2gray(img)
|
93 |
+
grad = np.abs(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)) + np.abs(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3))
|
94 |
+
grad = cv2.resize(grad, (image_dim, image_dim), cv2.INTER_AREA)
|
95 |
+
|
96 |
+
# thresholding the gradient map to generate the edge-map as a proxy of the contextual cues
|
97 |
+
m = grad.min()
|
98 |
+
M = grad.max()
|
99 |
+
middle = m + (0.4 * (M - m))
|
100 |
+
grad[grad < middle] = 0
|
101 |
+
grad[grad >= middle] = 1
|
102 |
+
|
103 |
+
# dilation kernel with size of the receptive field
|
104 |
+
kernel = np.ones((int(basesize/speed_scale), int(basesize/speed_scale)), float)
|
105 |
+
# dilation kernel with size of the a quarter of receptive field used to compute k
|
106 |
+
# as described in section 6 of main paper
|
107 |
+
kernel2 = np.ones((int(basesize / (4*speed_scale)), int(basesize / (4*speed_scale))), float)
|
108 |
+
|
109 |
+
# Output resolution limit set by the whole_size_threshold and scale_threshold.
|
110 |
+
threshold = min(whole_size_threshold, scale_threshold * max(img.shape[:2]))
|
111 |
+
|
112 |
+
outputsize_scale = basesize / speed_scale
|
113 |
+
for p_size in range(int(basesize/speed_scale), int(threshold/speed_scale), int(basesize / (2*speed_scale))):
|
114 |
+
grad_resized = resizewithpool(grad, p_size)
|
115 |
+
grad_resized = cv2.resize(grad_resized, (p_size, p_size), cv2.INTER_NEAREST)
|
116 |
+
grad_resized[grad_resized >= 0.5] = 1
|
117 |
+
grad_resized[grad_resized < 0.5] = 0
|
118 |
+
|
119 |
+
dilated = cv2.dilate(grad_resized, kernel, iterations=1)
|
120 |
+
meanvalue = (1-dilated).mean()
|
121 |
+
if meanvalue > confidence:
|
122 |
+
break
|
123 |
+
else:
|
124 |
+
outputsize_scale = p_size
|
125 |
+
|
126 |
+
grad_region = cv2.dilate(grad_resized, kernel2, iterations=1)
|
127 |
+
patch_scale = grad_region.mean()
|
128 |
+
|
129 |
+
return int(outputsize_scale*speed_scale), patch_scale
|
130 |
+
|
131 |
+
# Generate a double-input depth estimation
|
132 |
+
def doubleestimate(img, size1, size2, pix2pixsize, model, net_type, pix2pixmodel):
|
133 |
+
# Generate the low resolution estimation
|
134 |
+
estimate1 = singleestimate(img, size1, model, net_type)
|
135 |
+
# Resize to the inference size of merge network.
|
136 |
+
estimate1 = cv2.resize(estimate1, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC)
|
137 |
+
|
138 |
+
# Generate the high resolution estimation
|
139 |
+
estimate2 = singleestimate(img, size2, model, net_type)
|
140 |
+
# Resize to the inference size of merge network.
|
141 |
+
estimate2 = cv2.resize(estimate2, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC)
|
142 |
+
|
143 |
+
# Inference on the merge model
|
144 |
+
pix2pixmodel.set_input(estimate1, estimate2)
|
145 |
+
pix2pixmodel.test()
|
146 |
+
visuals = pix2pixmodel.get_current_visuals()
|
147 |
+
prediction_mapped = visuals['fake_B']
|
148 |
+
prediction_mapped = (prediction_mapped+1)/2
|
149 |
+
prediction_mapped = (prediction_mapped - torch.min(prediction_mapped)) / (
|
150 |
+
torch.max(prediction_mapped) - torch.min(prediction_mapped))
|
151 |
+
prediction_mapped = prediction_mapped.squeeze().cpu().numpy()
|
152 |
+
|
153 |
+
return prediction_mapped
|
154 |
+
|
155 |
+
# Generate a single-input depth estimation
|
156 |
+
def singleestimate(img, msize, model, net_type):
|
157 |
+
# if net_type == 0:
|
158 |
+
return estimateleres(img, model, msize, msize)
|
159 |
+
# else:
|
160 |
+
# return estimatemidasBoost(img, model, msize, msize)
|
161 |
+
|
162 |
+
def applyGridpatch(blsize, stride, img, box):
|
163 |
+
# Extract a simple grid patch.
|
164 |
+
counter1 = 0
|
165 |
+
patch_bound_list = {}
|
166 |
+
for k in range(blsize, img.shape[1] - blsize, stride):
|
167 |
+
for j in range(blsize, img.shape[0] - blsize, stride):
|
168 |
+
patch_bound_list[str(counter1)] = {}
|
169 |
+
patchbounds = [j - blsize, k - blsize, j - blsize + 2 * blsize, k - blsize + 2 * blsize]
|
170 |
+
patch_bound = [box[0] + patchbounds[1], box[1] + patchbounds[0], patchbounds[3] - patchbounds[1],
|
171 |
+
patchbounds[2] - patchbounds[0]]
|
172 |
+
patch_bound_list[str(counter1)]['rect'] = patch_bound
|
173 |
+
patch_bound_list[str(counter1)]['size'] = patch_bound[2]
|
174 |
+
counter1 = counter1 + 1
|
175 |
+
return patch_bound_list
|
176 |
+
|
177 |
+
# Generating local patches to perform the local refinement described in section 6 of the main paper.
|
178 |
+
def generatepatchs(img, base_size):
|
179 |
+
|
180 |
+
# Compute the gradients as a proxy of the contextual cues.
|
181 |
+
img_gray = rgb2gray(img)
|
182 |
+
whole_grad = np.abs(cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=3)) +\
|
183 |
+
np.abs(cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=3))
|
184 |
+
|
185 |
+
threshold = whole_grad[whole_grad > 0].mean()
|
186 |
+
whole_grad[whole_grad < threshold] = 0
|
187 |
+
|
188 |
+
# We use the integral image to speed-up the evaluation of the amount of gradients for each patch.
|
189 |
+
gf = whole_grad.sum()/len(whole_grad.reshape(-1))
|
190 |
+
grad_integral_image = cv2.integral(whole_grad)
|
191 |
+
|
192 |
+
# Variables are selected such that the initial patch size would be the receptive field size
|
193 |
+
# and the stride is set to 1/3 of the receptive field size.
|
194 |
+
blsize = int(round(base_size/2))
|
195 |
+
stride = int(round(blsize*0.75))
|
196 |
+
|
197 |
+
# Get initial Grid
|
198 |
+
patch_bound_list = applyGridpatch(blsize, stride, img, [0, 0, 0, 0])
|
199 |
+
|
200 |
+
# Refine initial Grid of patches by discarding the flat (in terms of gradients of the rgb image) ones. Refine
|
201 |
+
# each patch size to ensure that there will be enough depth cues for the network to generate a consistent depth map.
|
202 |
+
print("Selecting patches ...")
|
203 |
+
patch_bound_list = adaptiveselection(grad_integral_image, patch_bound_list, gf)
|
204 |
+
|
205 |
+
# Sort the patch list to make sure the merging operation will be done with the correct order: starting from biggest
|
206 |
+
# patch
|
207 |
+
patchset = sorted(patch_bound_list.items(), key=lambda x: getitem(x[1], 'size'), reverse=True)
|
208 |
+
return patchset
|
209 |
+
|
210 |
+
def getGF_fromintegral(integralimage, rect):
|
211 |
+
# Computes the gradient density of a given patch from the gradient integral image.
|
212 |
+
x1 = rect[1]
|
213 |
+
x2 = rect[1]+rect[3]
|
214 |
+
y1 = rect[0]
|
215 |
+
y2 = rect[0]+rect[2]
|
216 |
+
value = integralimage[x2, y2]-integralimage[x1, y2]-integralimage[x2, y1]+integralimage[x1, y1]
|
217 |
+
return value
|
218 |
+
|
219 |
+
# Adaptively select patches
|
220 |
+
def adaptiveselection(integral_grad, patch_bound_list, gf):
|
221 |
+
patchlist = {}
|
222 |
+
count = 0
|
223 |
+
height, width = integral_grad.shape
|
224 |
+
|
225 |
+
search_step = int(32/factor)
|
226 |
+
|
227 |
+
# Go through all patches
|
228 |
+
for c in range(len(patch_bound_list)):
|
229 |
+
# Get patch
|
230 |
+
bbox = patch_bound_list[str(c)]['rect']
|
231 |
+
|
232 |
+
# Compute the amount of gradients present in the patch from the integral image.
|
233 |
+
cgf = getGF_fromintegral(integral_grad, bbox)/(bbox[2]*bbox[3])
|
234 |
+
|
235 |
+
# Check if patching is beneficial by comparing the gradient density of the patch to
|
236 |
+
# the gradient density of the whole image
|
237 |
+
if cgf >= gf:
|
238 |
+
bbox_test = bbox.copy()
|
239 |
+
patchlist[str(count)] = {}
|
240 |
+
|
241 |
+
# Enlarge each patch until the gradient density of the patch is equal
|
242 |
+
# to the whole image gradient density
|
243 |
+
while True:
|
244 |
+
|
245 |
+
bbox_test[0] = bbox_test[0] - int(search_step/2)
|
246 |
+
bbox_test[1] = bbox_test[1] - int(search_step/2)
|
247 |
+
|
248 |
+
bbox_test[2] = bbox_test[2] + search_step
|
249 |
+
bbox_test[3] = bbox_test[3] + search_step
|
250 |
+
|
251 |
+
# Check if we are still within the image
|
252 |
+
if bbox_test[0] < 0 or bbox_test[1] < 0 or bbox_test[1] + bbox_test[3] >= height \
|
253 |
+
or bbox_test[0] + bbox_test[2] >= width:
|
254 |
+
break
|
255 |
+
|
256 |
+
# Compare gradient density
|
257 |
+
cgf = getGF_fromintegral(integral_grad, bbox_test)/(bbox_test[2]*bbox_test[3])
|
258 |
+
if cgf < gf:
|
259 |
+
break
|
260 |
+
bbox = bbox_test.copy()
|
261 |
+
|
262 |
+
# Add patch to selected patches
|
263 |
+
patchlist[str(count)]['rect'] = bbox
|
264 |
+
patchlist[str(count)]['size'] = bbox[2]
|
265 |
+
count = count + 1
|
266 |
+
|
267 |
+
# Return selected patches
|
268 |
+
return patchlist
|
269 |
+
|
270 |
+
def impatch(image, rect):
|
271 |
+
# Extract the given patch pixels from a given image.
|
272 |
+
w1 = rect[0]
|
273 |
+
h1 = rect[1]
|
274 |
+
w2 = w1 + rect[2]
|
275 |
+
h2 = h1 + rect[3]
|
276 |
+
image_patch = image[h1:h2, w1:w2]
|
277 |
+
return image_patch
|
278 |
+
|
279 |
+
class ImageandPatchs:
|
280 |
+
def __init__(self, root_dir, name, patchsinfo, rgb_image, scale=1):
|
281 |
+
self.root_dir = root_dir
|
282 |
+
self.patchsinfo = patchsinfo
|
283 |
+
self.name = name
|
284 |
+
self.patchs = patchsinfo
|
285 |
+
self.scale = scale
|
286 |
+
|
287 |
+
self.rgb_image = cv2.resize(rgb_image, (round(rgb_image.shape[1]*scale), round(rgb_image.shape[0]*scale)),
|
288 |
+
interpolation=cv2.INTER_CUBIC)
|
289 |
+
|
290 |
+
self.do_have_estimate = False
|
291 |
+
self.estimation_updated_image = None
|
292 |
+
self.estimation_base_image = None
|
293 |
+
|
294 |
+
def __len__(self):
|
295 |
+
return len(self.patchs)
|
296 |
+
|
297 |
+
def set_base_estimate(self, est):
|
298 |
+
self.estimation_base_image = est
|
299 |
+
if self.estimation_updated_image is not None:
|
300 |
+
self.do_have_estimate = True
|
301 |
+
|
302 |
+
def set_updated_estimate(self, est):
|
303 |
+
self.estimation_updated_image = est
|
304 |
+
if self.estimation_base_image is not None:
|
305 |
+
self.do_have_estimate = True
|
306 |
+
|
307 |
+
def __getitem__(self, index):
|
308 |
+
patch_id = int(self.patchs[index][0])
|
309 |
+
rect = np.array(self.patchs[index][1]['rect'])
|
310 |
+
msize = self.patchs[index][1]['size']
|
311 |
+
|
312 |
+
## applying scale to rect:
|
313 |
+
rect = np.round(rect * self.scale)
|
314 |
+
rect = rect.astype('int')
|
315 |
+
msize = round(msize * self.scale)
|
316 |
+
|
317 |
+
patch_rgb = impatch(self.rgb_image, rect)
|
318 |
+
if self.do_have_estimate:
|
319 |
+
patch_whole_estimate_base = impatch(self.estimation_base_image, rect)
|
320 |
+
patch_whole_estimate_updated = impatch(self.estimation_updated_image, rect)
|
321 |
+
return {'patch_rgb': patch_rgb, 'patch_whole_estimate_base': patch_whole_estimate_base,
|
322 |
+
'patch_whole_estimate_updated': patch_whole_estimate_updated, 'rect': rect,
|
323 |
+
'size': msize, 'id': patch_id}
|
324 |
+
else:
|
325 |
+
return {'patch_rgb': patch_rgb, 'rect': rect, 'size': msize, 'id': patch_id}
|
326 |
+
|
327 |
+
def print_options(self, opt):
|
328 |
+
"""Print and save options
|
329 |
+
|
330 |
+
It will print both current options and default values(if different).
|
331 |
+
It will save options into a text file / [checkpoints_dir] / opt.txt
|
332 |
+
"""
|
333 |
+
message = ''
|
334 |
+
message += '----------------- Options ---------------\n'
|
335 |
+
for k, v in sorted(vars(opt).items()):
|
336 |
+
comment = ''
|
337 |
+
default = self.parser.get_default(k)
|
338 |
+
if v != default:
|
339 |
+
comment = '\t[default: %s]' % str(default)
|
340 |
+
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
|
341 |
+
message += '----------------- End -------------------'
|
342 |
+
print(message)
|
343 |
+
|
344 |
+
# save to the disk
|
345 |
+
"""
|
346 |
+
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
|
347 |
+
util.mkdirs(expr_dir)
|
348 |
+
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
|
349 |
+
with open(file_name, 'wt') as opt_file:
|
350 |
+
opt_file.write(message)
|
351 |
+
opt_file.write('\n')
|
352 |
+
"""
|
353 |
+
|
354 |
+
def parse(self):
|
355 |
+
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
|
356 |
+
opt = self.gather_options()
|
357 |
+
opt.isTrain = self.isTrain # train or test
|
358 |
+
|
359 |
+
# process opt.suffix
|
360 |
+
if opt.suffix:
|
361 |
+
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
|
362 |
+
opt.name = opt.name + suffix
|
363 |
+
|
364 |
+
#self.print_options(opt)
|
365 |
+
|
366 |
+
# set gpu ids
|
367 |
+
str_ids = opt.gpu_ids.split(',')
|
368 |
+
opt.gpu_ids = []
|
369 |
+
for str_id in str_ids:
|
370 |
+
id = int(str_id)
|
371 |
+
if id >= 0:
|
372 |
+
opt.gpu_ids.append(id)
|
373 |
+
#if len(opt.gpu_ids) > 0:
|
374 |
+
# torch.cuda.set_device(opt.gpu_ids[0])
|
375 |
+
|
376 |
+
self.opt = opt
|
377 |
+
return self.opt
|
378 |
+
|
379 |
+
|
380 |
+
def estimateboost(img, model, model_type, pix2pixmodel, max_res=512, depthmap_script_boost_rmax=None):
|
381 |
+
global whole_size_threshold
|
382 |
+
|
383 |
+
# get settings
|
384 |
+
if depthmap_script_boost_rmax:
|
385 |
+
whole_size_threshold = depthmap_script_boost_rmax
|
386 |
+
|
387 |
+
if model_type == 0: #leres
|
388 |
+
net_receptive_field_size = 448
|
389 |
+
patch_netsize = 2 * net_receptive_field_size
|
390 |
+
elif model_type == 1: #dpt_beit_large_512
|
391 |
+
net_receptive_field_size = 512
|
392 |
+
patch_netsize = 2 * net_receptive_field_size
|
393 |
+
else: #other midas
|
394 |
+
net_receptive_field_size = 384
|
395 |
+
patch_netsize = 2 * net_receptive_field_size
|
396 |
+
|
397 |
+
gc.collect()
|
398 |
+
torch_gc()
|
399 |
+
|
400 |
+
# Generate mask used to smoothly blend the local pathc estimations to the base estimate.
|
401 |
+
# It is arbitrarily large to avoid artifacts during rescaling for each crop.
|
402 |
+
mask_org = generatemask((3000, 3000))
|
403 |
+
mask = mask_org.copy()
|
404 |
+
|
405 |
+
# Value x of R_x defined in the section 5 of the main paper.
|
406 |
+
r_threshold_value = 0.2
|
407 |
+
#if R0:
|
408 |
+
# r_threshold_value = 0
|
409 |
+
|
410 |
+
input_resolution = img.shape
|
411 |
+
scale_threshold = 3 # Allows up-scaling with a scale up to 3
|
412 |
+
|
413 |
+
# Find the best input resolution R-x. The resolution search described in section 5-double estimation of the main paper and section B of the
|
414 |
+
# supplementary material.
|
415 |
+
whole_image_optimal_size, patch_scale = calculateprocessingres(img, net_receptive_field_size, r_threshold_value, scale_threshold, whole_size_threshold)
|
416 |
+
|
417 |
+
# print('wholeImage being processed in :', whole_image_optimal_size)
|
418 |
+
|
419 |
+
# Generate the base estimate using the double estimation.
|
420 |
+
whole_estimate = doubleestimate(img, net_receptive_field_size, whole_image_optimal_size, pix2pixsize, model, model_type, pix2pixmodel)
|
421 |
+
|
422 |
+
# Compute the multiplier described in section 6 of the main paper to make sure our initial patch can select
|
423 |
+
# small high-density regions of the image.
|
424 |
+
global factor
|
425 |
+
factor = max(min(1, 4 * patch_scale * whole_image_optimal_size / whole_size_threshold), 0.2)
|
426 |
+
# print('Adjust factor is:', 1/factor)
|
427 |
+
|
428 |
+
# Check if Local boosting is beneficial.
|
429 |
+
if max_res < whole_image_optimal_size:
|
430 |
+
# print("No Local boosting. Specified Max Res is smaller than R20, Returning doubleestimate result")
|
431 |
+
return cv2.resize(whole_estimate, (input_resolution[1], input_resolution[0]), interpolation=cv2.INTER_CUBIC)
|
432 |
+
|
433 |
+
# Compute the default target resolution.
|
434 |
+
if img.shape[0] > img.shape[1]:
|
435 |
+
a = 2 * whole_image_optimal_size
|
436 |
+
b = round(2 * whole_image_optimal_size * img.shape[1] / img.shape[0])
|
437 |
+
else:
|
438 |
+
a = round(2 * whole_image_optimal_size * img.shape[0] / img.shape[1])
|
439 |
+
b = 2 * whole_image_optimal_size
|
440 |
+
b = int(round(b / factor))
|
441 |
+
a = int(round(a / factor))
|
442 |
+
|
443 |
+
"""
|
444 |
+
# recompute a, b and saturate to max res.
|
445 |
+
if max(a,b) > max_res:
|
446 |
+
print('Default Res is higher than max-res: Reducing final resolution')
|
447 |
+
if img.shape[0] > img.shape[1]:
|
448 |
+
a = max_res
|
449 |
+
b = round(max_res * img.shape[1] / img.shape[0])
|
450 |
+
else:
|
451 |
+
a = round(max_res * img.shape[0] / img.shape[1])
|
452 |
+
b = max_res
|
453 |
+
b = int(b)
|
454 |
+
a = int(a)
|
455 |
+
"""
|
456 |
+
|
457 |
+
img = cv2.resize(img, (b, a), interpolation=cv2.INTER_CUBIC)
|
458 |
+
|
459 |
+
# Extract selected patches for local refinement
|
460 |
+
base_size = net_receptive_field_size * 2
|
461 |
+
patchset = generatepatchs(img, base_size)
|
462 |
+
|
463 |
+
# print('Target resolution: ', img.shape)
|
464 |
+
|
465 |
+
# Computing a scale in case user prompted to generate the results as the same resolution of the input.
|
466 |
+
# Notice that our method output resolution is independent of the input resolution and this parameter will only
|
467 |
+
# enable a scaling operation during the local patch merge implementation to generate results with the same resolution
|
468 |
+
# as the input.
|
469 |
+
"""
|
470 |
+
if output_resolution == 1:
|
471 |
+
mergein_scale = input_resolution[0] / img.shape[0]
|
472 |
+
print('Dynamicly change merged-in resolution; scale:', mergein_scale)
|
473 |
+
else:
|
474 |
+
mergein_scale = 1
|
475 |
+
"""
|
476 |
+
# always rescale to input res for now
|
477 |
+
mergein_scale = input_resolution[0] / img.shape[0]
|
478 |
+
|
479 |
+
imageandpatchs = ImageandPatchs('', '', patchset, img, mergein_scale)
|
480 |
+
whole_estimate_resized = cv2.resize(whole_estimate, (round(img.shape[1]*mergein_scale),
|
481 |
+
round(img.shape[0]*mergein_scale)), interpolation=cv2.INTER_CUBIC)
|
482 |
+
imageandpatchs.set_base_estimate(whole_estimate_resized.copy())
|
483 |
+
imageandpatchs.set_updated_estimate(whole_estimate_resized.copy())
|
484 |
+
|
485 |
+
print('Resulting depthmap resolution will be :', whole_estimate_resized.shape[:2])
|
486 |
+
print('Patches to process: '+str(len(imageandpatchs)))
|
487 |
+
|
488 |
+
# Enumerate through all patches, generate their estimations and refining the base estimate.
|
489 |
+
for patch_ind in range(len(imageandpatchs)):
|
490 |
+
|
491 |
+
# Get patch information
|
492 |
+
patch = imageandpatchs[patch_ind] # patch object
|
493 |
+
patch_rgb = patch['patch_rgb'] # rgb patch
|
494 |
+
patch_whole_estimate_base = patch['patch_whole_estimate_base'] # corresponding patch from base
|
495 |
+
rect = patch['rect'] # patch size and location
|
496 |
+
patch_id = patch['id'] # patch ID
|
497 |
+
org_size = patch_whole_estimate_base.shape # the original size from the unscaled input
|
498 |
+
print('\t Processing patch', patch_ind, '/', len(imageandpatchs)-1, '|', rect)
|
499 |
+
|
500 |
+
# We apply double estimation for patches. The high resolution value is fixed to twice the receptive
|
501 |
+
# field size of the network for patches to accelerate the process.
|
502 |
+
patch_estimation = doubleestimate(patch_rgb, net_receptive_field_size, patch_netsize, pix2pixsize, model, model_type, pix2pixmodel)
|
503 |
+
patch_estimation = cv2.resize(patch_estimation, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC)
|
504 |
+
patch_whole_estimate_base = cv2.resize(patch_whole_estimate_base, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC)
|
505 |
+
|
506 |
+
# Merging the patch estimation into the base estimate using our merge network:
|
507 |
+
# We feed the patch estimation and the same region from the updated base estimate to the merge network
|
508 |
+
# to generate the target estimate for the corresponding region.
|
509 |
+
pix2pixmodel.set_input(patch_whole_estimate_base, patch_estimation)
|
510 |
+
|
511 |
+
# Run merging network
|
512 |
+
pix2pixmodel.test()
|
513 |
+
visuals = pix2pixmodel.get_current_visuals()
|
514 |
+
|
515 |
+
prediction_mapped = visuals['fake_B']
|
516 |
+
prediction_mapped = (prediction_mapped+1)/2
|
517 |
+
prediction_mapped = prediction_mapped.squeeze().cpu().numpy()
|
518 |
+
|
519 |
+
mapped = prediction_mapped
|
520 |
+
|
521 |
+
# We use a simple linear polynomial to make sure the result of the merge network would match the values of
|
522 |
+
# base estimate
|
523 |
+
p_coef = np.polyfit(mapped.reshape(-1), patch_whole_estimate_base.reshape(-1), deg=1)
|
524 |
+
merged = np.polyval(p_coef, mapped.reshape(-1)).reshape(mapped.shape)
|
525 |
+
|
526 |
+
merged = cv2.resize(merged, (org_size[1],org_size[0]), interpolation=cv2.INTER_CUBIC)
|
527 |
+
|
528 |
+
# Get patch size and location
|
529 |
+
w1 = rect[0]
|
530 |
+
h1 = rect[1]
|
531 |
+
w2 = w1 + rect[2]
|
532 |
+
h2 = h1 + rect[3]
|
533 |
+
|
534 |
+
# To speed up the implementation, we only generate the Gaussian mask once with a sufficiently large size
|
535 |
+
# and resize it to our needed size while merging the patches.
|
536 |
+
if mask.shape != org_size:
|
537 |
+
mask = cv2.resize(mask_org, (org_size[1],org_size[0]), interpolation=cv2.INTER_LINEAR)
|
538 |
+
|
539 |
+
tobemergedto = imageandpatchs.estimation_updated_image
|
540 |
+
|
541 |
+
# Update the whole estimation:
|
542 |
+
# We use a simple Gaussian mask to blend the merged patch region with the base estimate to ensure seamless
|
543 |
+
# blending at the boundaries of the patch region.
|
544 |
+
tobemergedto[h1:h2, w1:w2] = np.multiply(tobemergedto[h1:h2, w1:w2], 1 - mask) + np.multiply(merged, mask)
|
545 |
+
imageandpatchs.set_updated_estimate(tobemergedto)
|
546 |
+
|
547 |
+
# output
|
548 |
+
return cv2.resize(imageandpatchs.estimation_updated_image, (input_resolution[1], input_resolution[0]), interpolation=cv2.INTER_CUBIC)
|
controlnet_aux/src/controlnet_aux/leres/leres/multi_depth_model_woauxi.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
from . import network_auxi as network
|
5 |
+
from .net_tools import get_func
|
6 |
+
|
7 |
+
|
8 |
+
class RelDepthModel(nn.Module):
|
9 |
+
def __init__(self, backbone='resnet50'):
|
10 |
+
super(RelDepthModel, self).__init__()
|
11 |
+
if backbone == 'resnet50':
|
12 |
+
encoder = 'resnet50_stride32'
|
13 |
+
elif backbone == 'resnext101':
|
14 |
+
encoder = 'resnext101_stride32x8d'
|
15 |
+
self.depth_model = DepthModel(encoder)
|
16 |
+
|
17 |
+
def inference(self, rgb):
|
18 |
+
with torch.no_grad():
|
19 |
+
input = rgb.to(self.depth_model.device)
|
20 |
+
depth = self.depth_model(input)
|
21 |
+
#pred_depth_out = depth - depth.min() + 0.01
|
22 |
+
return depth #pred_depth_out
|
23 |
+
|
24 |
+
|
25 |
+
class DepthModel(nn.Module):
|
26 |
+
def __init__(self, encoder):
|
27 |
+
super(DepthModel, self).__init__()
|
28 |
+
backbone = network.__name__.split('.')[-1] + '.' + encoder
|
29 |
+
self.encoder_modules = get_func(backbone)()
|
30 |
+
self.decoder_modules = network.Decoder()
|
31 |
+
|
32 |
+
def forward(self, x):
|
33 |
+
lateral_out = self.encoder_modules(x)
|
34 |
+
out_logit = self.decoder_modules(lateral_out)
|
35 |
+
return out_logit
|
controlnet_aux/src/controlnet_aux/leres/leres/net_tools.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
from collections import OrderedDict
|
5 |
+
|
6 |
+
|
7 |
+
def get_func(func_name):
|
8 |
+
"""Helper to return a function object by name. func_name must identify a
|
9 |
+
function in this module or the path to a function relative to the base
|
10 |
+
'modeling' module.
|
11 |
+
"""
|
12 |
+
if func_name == '':
|
13 |
+
return None
|
14 |
+
try:
|
15 |
+
parts = func_name.split('.')
|
16 |
+
# Refers to a function in this module
|
17 |
+
if len(parts) == 1:
|
18 |
+
return globals()[parts[0]]
|
19 |
+
# Otherwise, assume we're referencing a module under modeling
|
20 |
+
module_name = 'controlnet_aux.leres.leres.' + '.'.join(parts[:-1])
|
21 |
+
module = importlib.import_module(module_name)
|
22 |
+
return getattr(module, parts[-1])
|
23 |
+
except Exception:
|
24 |
+
print('Failed to f1ind function: %s', func_name)
|
25 |
+
raise
|
26 |
+
|
27 |
+
def load_ckpt(args, depth_model, shift_model, focal_model):
|
28 |
+
"""
|
29 |
+
Load checkpoint.
|
30 |
+
"""
|
31 |
+
if os.path.isfile(args.load_ckpt):
|
32 |
+
print("loading checkpoint %s" % args.load_ckpt)
|
33 |
+
checkpoint = torch.load(args.load_ckpt)
|
34 |
+
if shift_model is not None:
|
35 |
+
shift_model.load_state_dict(strip_prefix_if_present(checkpoint['shift_model'], 'module.'),
|
36 |
+
strict=True)
|
37 |
+
if focal_model is not None:
|
38 |
+
focal_model.load_state_dict(strip_prefix_if_present(checkpoint['focal_model'], 'module.'),
|
39 |
+
strict=True)
|
40 |
+
depth_model.load_state_dict(strip_prefix_if_present(checkpoint['depth_model'], "module."),
|
41 |
+
strict=True)
|
42 |
+
del checkpoint
|
43 |
+
if torch.cuda.is_available():
|
44 |
+
torch.cuda.empty_cache()
|
45 |
+
|
46 |
+
|
47 |
+
def strip_prefix_if_present(state_dict, prefix):
|
48 |
+
keys = sorted(state_dict.keys())
|
49 |
+
if not all(key.startswith(prefix) for key in keys):
|
50 |
+
return state_dict
|
51 |
+
stripped_state_dict = OrderedDict()
|
52 |
+
for key, value in state_dict.items():
|
53 |
+
stripped_state_dict[key.replace(prefix, "")] = value
|
54 |
+
return stripped_state_dict
|
controlnet_aux/src/controlnet_aux/leres/leres/network_auxi.py
ADDED
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.init as init
|
4 |
+
|
5 |
+
from . import Resnet, Resnext_torch
|
6 |
+
|
7 |
+
|
8 |
+
def resnet50_stride32():
|
9 |
+
return DepthNet(backbone='resnet', depth=50, upfactors=[2, 2, 2, 2])
|
10 |
+
|
11 |
+
def resnext101_stride32x8d():
|
12 |
+
return DepthNet(backbone='resnext101_32x8d', depth=101, upfactors=[2, 2, 2, 2])
|
13 |
+
|
14 |
+
|
15 |
+
class Decoder(nn.Module):
|
16 |
+
def __init__(self):
|
17 |
+
super(Decoder, self).__init__()
|
18 |
+
self.inchannels = [256, 512, 1024, 2048]
|
19 |
+
self.midchannels = [256, 256, 256, 512]
|
20 |
+
self.upfactors = [2,2,2,2]
|
21 |
+
self.outchannels = 1
|
22 |
+
|
23 |
+
self.conv = FTB(inchannels=self.inchannels[3], midchannels=self.midchannels[3])
|
24 |
+
self.conv1 = nn.Conv2d(in_channels=self.midchannels[3], out_channels=self.midchannels[2], kernel_size=3, padding=1, stride=1, bias=True)
|
25 |
+
self.upsample = nn.Upsample(scale_factor=self.upfactors[3], mode='bilinear', align_corners=True)
|
26 |
+
|
27 |
+
self.ffm2 = FFM(inchannels=self.inchannels[2], midchannels=self.midchannels[2], outchannels = self.midchannels[2], upfactor=self.upfactors[2])
|
28 |
+
self.ffm1 = FFM(inchannels=self.inchannels[1], midchannels=self.midchannels[1], outchannels = self.midchannels[1], upfactor=self.upfactors[1])
|
29 |
+
self.ffm0 = FFM(inchannels=self.inchannels[0], midchannels=self.midchannels[0], outchannels = self.midchannels[0], upfactor=self.upfactors[0])
|
30 |
+
|
31 |
+
self.outconv = AO(inchannels=self.midchannels[0], outchannels=self.outchannels, upfactor=2)
|
32 |
+
self._init_params()
|
33 |
+
|
34 |
+
def _init_params(self):
|
35 |
+
for m in self.modules():
|
36 |
+
if isinstance(m, nn.Conv2d):
|
37 |
+
init.normal_(m.weight, std=0.01)
|
38 |
+
if m.bias is not None:
|
39 |
+
init.constant_(m.bias, 0)
|
40 |
+
elif isinstance(m, nn.ConvTranspose2d):
|
41 |
+
init.normal_(m.weight, std=0.01)
|
42 |
+
if m.bias is not None:
|
43 |
+
init.constant_(m.bias, 0)
|
44 |
+
elif isinstance(m, nn.BatchNorm2d): #NN.BatchNorm2d
|
45 |
+
init.constant_(m.weight, 1)
|
46 |
+
init.constant_(m.bias, 0)
|
47 |
+
elif isinstance(m, nn.Linear):
|
48 |
+
init.normal_(m.weight, std=0.01)
|
49 |
+
if m.bias is not None:
|
50 |
+
init.constant_(m.bias, 0)
|
51 |
+
|
52 |
+
def forward(self, features):
|
53 |
+
x_32x = self.conv(features[3]) # 1/32
|
54 |
+
x_32 = self.conv1(x_32x)
|
55 |
+
x_16 = self.upsample(x_32) # 1/16
|
56 |
+
|
57 |
+
x_8 = self.ffm2(features[2], x_16) # 1/8
|
58 |
+
x_4 = self.ffm1(features[1], x_8) # 1/4
|
59 |
+
x_2 = self.ffm0(features[0], x_4) # 1/2
|
60 |
+
#-----------------------------------------
|
61 |
+
x = self.outconv(x_2) # original size
|
62 |
+
return x
|
63 |
+
|
64 |
+
class DepthNet(nn.Module):
|
65 |
+
__factory = {
|
66 |
+
18: Resnet.resnet18,
|
67 |
+
34: Resnet.resnet34,
|
68 |
+
50: Resnet.resnet50,
|
69 |
+
101: Resnet.resnet101,
|
70 |
+
152: Resnet.resnet152
|
71 |
+
}
|
72 |
+
def __init__(self,
|
73 |
+
backbone='resnet',
|
74 |
+
depth=50,
|
75 |
+
upfactors=[2, 2, 2, 2]):
|
76 |
+
super(DepthNet, self).__init__()
|
77 |
+
self.backbone = backbone
|
78 |
+
self.depth = depth
|
79 |
+
self.pretrained = False
|
80 |
+
self.inchannels = [256, 512, 1024, 2048]
|
81 |
+
self.midchannels = [256, 256, 256, 512]
|
82 |
+
self.upfactors = upfactors
|
83 |
+
self.outchannels = 1
|
84 |
+
|
85 |
+
# Build model
|
86 |
+
if self.backbone == 'resnet':
|
87 |
+
if self.depth not in DepthNet.__factory:
|
88 |
+
raise KeyError("Unsupported depth:", self.depth)
|
89 |
+
self.encoder = DepthNet.__factory[depth](pretrained=self.pretrained)
|
90 |
+
elif self.backbone == 'resnext101_32x8d':
|
91 |
+
self.encoder = Resnext_torch.resnext101_32x8d(pretrained=self.pretrained)
|
92 |
+
else:
|
93 |
+
self.encoder = Resnext_torch.resnext101(pretrained=self.pretrained)
|
94 |
+
|
95 |
+
def forward(self, x):
|
96 |
+
x = self.encoder(x) # 1/32, 1/16, 1/8, 1/4
|
97 |
+
return x
|
98 |
+
|
99 |
+
|
100 |
+
class FTB(nn.Module):
|
101 |
+
def __init__(self, inchannels, midchannels=512):
|
102 |
+
super(FTB, self).__init__()
|
103 |
+
self.in1 = inchannels
|
104 |
+
self.mid = midchannels
|
105 |
+
self.conv1 = nn.Conv2d(in_channels=self.in1, out_channels=self.mid, kernel_size=3, padding=1, stride=1,
|
106 |
+
bias=True)
|
107 |
+
# NN.BatchNorm2d
|
108 |
+
self.conv_branch = nn.Sequential(nn.ReLU(inplace=True), \
|
109 |
+
nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3,
|
110 |
+
padding=1, stride=1, bias=True), \
|
111 |
+
nn.BatchNorm2d(num_features=self.mid), \
|
112 |
+
nn.ReLU(inplace=True), \
|
113 |
+
nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3,
|
114 |
+
padding=1, stride=1, bias=True))
|
115 |
+
self.relu = nn.ReLU(inplace=True)
|
116 |
+
|
117 |
+
self.init_params()
|
118 |
+
|
119 |
+
def forward(self, x):
|
120 |
+
x = self.conv1(x)
|
121 |
+
x = x + self.conv_branch(x)
|
122 |
+
x = self.relu(x)
|
123 |
+
|
124 |
+
return x
|
125 |
+
|
126 |
+
def init_params(self):
|
127 |
+
for m in self.modules():
|
128 |
+
if isinstance(m, nn.Conv2d):
|
129 |
+
init.normal_(m.weight, std=0.01)
|
130 |
+
if m.bias is not None:
|
131 |
+
init.constant_(m.bias, 0)
|
132 |
+
elif isinstance(m, nn.ConvTranspose2d):
|
133 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
134 |
+
init.normal_(m.weight, std=0.01)
|
135 |
+
# init.xavier_normal_(m.weight)
|
136 |
+
if m.bias is not None:
|
137 |
+
init.constant_(m.bias, 0)
|
138 |
+
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
|
139 |
+
init.constant_(m.weight, 1)
|
140 |
+
init.constant_(m.bias, 0)
|
141 |
+
elif isinstance(m, nn.Linear):
|
142 |
+
init.normal_(m.weight, std=0.01)
|
143 |
+
if m.bias is not None:
|
144 |
+
init.constant_(m.bias, 0)
|
145 |
+
|
146 |
+
|
147 |
+
class ATA(nn.Module):
|
148 |
+
def __init__(self, inchannels, reduction=8):
|
149 |
+
super(ATA, self).__init__()
|
150 |
+
self.inchannels = inchannels
|
151 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
152 |
+
self.fc = nn.Sequential(nn.Linear(self.inchannels * 2, self.inchannels // reduction),
|
153 |
+
nn.ReLU(inplace=True),
|
154 |
+
nn.Linear(self.inchannels // reduction, self.inchannels),
|
155 |
+
nn.Sigmoid())
|
156 |
+
self.init_params()
|
157 |
+
|
158 |
+
def forward(self, low_x, high_x):
|
159 |
+
n, c, _, _ = low_x.size()
|
160 |
+
x = torch.cat([low_x, high_x], 1)
|
161 |
+
x = self.avg_pool(x)
|
162 |
+
x = x.view(n, -1)
|
163 |
+
x = self.fc(x).view(n, c, 1, 1)
|
164 |
+
x = low_x * x + high_x
|
165 |
+
|
166 |
+
return x
|
167 |
+
|
168 |
+
def init_params(self):
|
169 |
+
for m in self.modules():
|
170 |
+
if isinstance(m, nn.Conv2d):
|
171 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
172 |
+
# init.normal(m.weight, std=0.01)
|
173 |
+
init.xavier_normal_(m.weight)
|
174 |
+
if m.bias is not None:
|
175 |
+
init.constant_(m.bias, 0)
|
176 |
+
elif isinstance(m, nn.ConvTranspose2d):
|
177 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
178 |
+
# init.normal_(m.weight, std=0.01)
|
179 |
+
init.xavier_normal_(m.weight)
|
180 |
+
if m.bias is not None:
|
181 |
+
init.constant_(m.bias, 0)
|
182 |
+
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
|
183 |
+
init.constant_(m.weight, 1)
|
184 |
+
init.constant_(m.bias, 0)
|
185 |
+
elif isinstance(m, nn.Linear):
|
186 |
+
init.normal_(m.weight, std=0.01)
|
187 |
+
if m.bias is not None:
|
188 |
+
init.constant_(m.bias, 0)
|
189 |
+
|
190 |
+
|
191 |
+
class FFM(nn.Module):
|
192 |
+
def __init__(self, inchannels, midchannels, outchannels, upfactor=2):
|
193 |
+
super(FFM, self).__init__()
|
194 |
+
self.inchannels = inchannels
|
195 |
+
self.midchannels = midchannels
|
196 |
+
self.outchannels = outchannels
|
197 |
+
self.upfactor = upfactor
|
198 |
+
|
199 |
+
self.ftb1 = FTB(inchannels=self.inchannels, midchannels=self.midchannels)
|
200 |
+
# self.ata = ATA(inchannels = self.midchannels)
|
201 |
+
self.ftb2 = FTB(inchannels=self.midchannels, midchannels=self.outchannels)
|
202 |
+
|
203 |
+
self.upsample = nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True)
|
204 |
+
|
205 |
+
self.init_params()
|
206 |
+
|
207 |
+
def forward(self, low_x, high_x):
|
208 |
+
x = self.ftb1(low_x)
|
209 |
+
x = x + high_x
|
210 |
+
x = self.ftb2(x)
|
211 |
+
x = self.upsample(x)
|
212 |
+
|
213 |
+
return x
|
214 |
+
|
215 |
+
def init_params(self):
|
216 |
+
for m in self.modules():
|
217 |
+
if isinstance(m, nn.Conv2d):
|
218 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
219 |
+
init.normal_(m.weight, std=0.01)
|
220 |
+
# init.xavier_normal_(m.weight)
|
221 |
+
if m.bias is not None:
|
222 |
+
init.constant_(m.bias, 0)
|
223 |
+
elif isinstance(m, nn.ConvTranspose2d):
|
224 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
225 |
+
init.normal_(m.weight, std=0.01)
|
226 |
+
# init.xavier_normal_(m.weight)
|
227 |
+
if m.bias is not None:
|
228 |
+
init.constant_(m.bias, 0)
|
229 |
+
elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d
|
230 |
+
init.constant_(m.weight, 1)
|
231 |
+
init.constant_(m.bias, 0)
|
232 |
+
elif isinstance(m, nn.Linear):
|
233 |
+
init.normal_(m.weight, std=0.01)
|
234 |
+
if m.bias is not None:
|
235 |
+
init.constant_(m.bias, 0)
|
236 |
+
|
237 |
+
|
238 |
+
class AO(nn.Module):
|
239 |
+
# Adaptive output module
|
240 |
+
def __init__(self, inchannels, outchannels, upfactor=2):
|
241 |
+
super(AO, self).__init__()
|
242 |
+
self.inchannels = inchannels
|
243 |
+
self.outchannels = outchannels
|
244 |
+
self.upfactor = upfactor
|
245 |
+
|
246 |
+
self.adapt_conv = nn.Sequential(
|
247 |
+
nn.Conv2d(in_channels=self.inchannels, out_channels=self.inchannels // 2, kernel_size=3, padding=1,
|
248 |
+
stride=1, bias=True), \
|
249 |
+
nn.BatchNorm2d(num_features=self.inchannels // 2), \
|
250 |
+
nn.ReLU(inplace=True), \
|
251 |
+
nn.Conv2d(in_channels=self.inchannels // 2, out_channels=self.outchannels, kernel_size=3, padding=1,
|
252 |
+
stride=1, bias=True), \
|
253 |
+
nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True))
|
254 |
+
|
255 |
+
self.init_params()
|
256 |
+
|
257 |
+
def forward(self, x):
|
258 |
+
x = self.adapt_conv(x)
|
259 |
+
return x
|
260 |
+
|
261 |
+
def init_params(self):
|
262 |
+
for m in self.modules():
|
263 |
+
if isinstance(m, nn.Conv2d):
|
264 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
265 |
+
init.normal_(m.weight, std=0.01)
|
266 |
+
# init.xavier_normal_(m.weight)
|
267 |
+
if m.bias is not None:
|
268 |
+
init.constant_(m.bias, 0)
|
269 |
+
elif isinstance(m, nn.ConvTranspose2d):
|
270 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
271 |
+
init.normal_(m.weight, std=0.01)
|
272 |
+
# init.xavier_normal_(m.weight)
|
273 |
+
if m.bias is not None:
|
274 |
+
init.constant_(m.bias, 0)
|
275 |
+
elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d
|
276 |
+
init.constant_(m.weight, 1)
|
277 |
+
init.constant_(m.bias, 0)
|
278 |
+
elif isinstance(m, nn.Linear):
|
279 |
+
init.normal_(m.weight, std=0.01)
|
280 |
+
if m.bias is not None:
|
281 |
+
init.constant_(m.bias, 0)
|
282 |
+
|
283 |
+
|
284 |
+
|
285 |
+
# ==============================================================================================================
|
286 |
+
|
287 |
+
|
288 |
+
class ResidualConv(nn.Module):
|
289 |
+
def __init__(self, inchannels):
|
290 |
+
super(ResidualConv, self).__init__()
|
291 |
+
# NN.BatchNorm2d
|
292 |
+
self.conv = nn.Sequential(
|
293 |
+
# nn.BatchNorm2d(num_features=inchannels),
|
294 |
+
nn.ReLU(inplace=False),
|
295 |
+
# nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, padding=1, stride=1, groups=inchannels,bias=True),
|
296 |
+
# nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=1, padding=0, stride=1, groups=1,bias=True)
|
297 |
+
nn.Conv2d(in_channels=inchannels, out_channels=inchannels / 2, kernel_size=3, padding=1, stride=1,
|
298 |
+
bias=False),
|
299 |
+
nn.BatchNorm2d(num_features=inchannels / 2),
|
300 |
+
nn.ReLU(inplace=False),
|
301 |
+
nn.Conv2d(in_channels=inchannels / 2, out_channels=inchannels, kernel_size=3, padding=1, stride=1,
|
302 |
+
bias=False)
|
303 |
+
)
|
304 |
+
self.init_params()
|
305 |
+
|
306 |
+
def forward(self, x):
|
307 |
+
x = self.conv(x) + x
|
308 |
+
return x
|
309 |
+
|
310 |
+
def init_params(self):
|
311 |
+
for m in self.modules():
|
312 |
+
if isinstance(m, nn.Conv2d):
|
313 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
314 |
+
init.normal_(m.weight, std=0.01)
|
315 |
+
# init.xavier_normal_(m.weight)
|
316 |
+
if m.bias is not None:
|
317 |
+
init.constant_(m.bias, 0)
|
318 |
+
elif isinstance(m, nn.ConvTranspose2d):
|
319 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
320 |
+
init.normal_(m.weight, std=0.01)
|
321 |
+
# init.xavier_normal_(m.weight)
|
322 |
+
if m.bias is not None:
|
323 |
+
init.constant_(m.bias, 0)
|
324 |
+
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
|
325 |
+
init.constant_(m.weight, 1)
|
326 |
+
init.constant_(m.bias, 0)
|
327 |
+
elif isinstance(m, nn.Linear):
|
328 |
+
init.normal_(m.weight, std=0.01)
|
329 |
+
if m.bias is not None:
|
330 |
+
init.constant_(m.bias, 0)
|
331 |
+
|
332 |
+
|
333 |
+
class FeatureFusion(nn.Module):
|
334 |
+
def __init__(self, inchannels, outchannels):
|
335 |
+
super(FeatureFusion, self).__init__()
|
336 |
+
self.conv = ResidualConv(inchannels=inchannels)
|
337 |
+
# NN.BatchNorm2d
|
338 |
+
self.up = nn.Sequential(ResidualConv(inchannels=inchannels),
|
339 |
+
nn.ConvTranspose2d(in_channels=inchannels, out_channels=outchannels, kernel_size=3,
|
340 |
+
stride=2, padding=1, output_padding=1),
|
341 |
+
nn.BatchNorm2d(num_features=outchannels),
|
342 |
+
nn.ReLU(inplace=True))
|
343 |
+
|
344 |
+
def forward(self, lowfeat, highfeat):
|
345 |
+
return self.up(highfeat + self.conv(lowfeat))
|
346 |
+
|
347 |
+
def init_params(self):
|
348 |
+
for m in self.modules():
|
349 |
+
if isinstance(m, nn.Conv2d):
|
350 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
351 |
+
init.normal_(m.weight, std=0.01)
|
352 |
+
# init.xavier_normal_(m.weight)
|
353 |
+
if m.bias is not None:
|
354 |
+
init.constant_(m.bias, 0)
|
355 |
+
elif isinstance(m, nn.ConvTranspose2d):
|
356 |
+
# init.kaiming_normal_(m.weight, mode='fan_out')
|
357 |
+
init.normal_(m.weight, std=0.01)
|
358 |
+
# init.xavier_normal_(m.weight)
|
359 |
+
if m.bias is not None:
|
360 |
+
init.constant_(m.bias, 0)
|
361 |
+
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
|
362 |
+
init.constant_(m.weight, 1)
|
363 |
+
init.constant_(m.bias, 0)
|
364 |
+
elif isinstance(m, nn.Linear):
|
365 |
+
init.normal_(m.weight, std=0.01)
|
366 |
+
if m.bias is not None:
|
367 |
+
init.constant_(m.bias, 0)
|
368 |
+
|
369 |
+
|
370 |
+
class SenceUnderstand(nn.Module):
|
371 |
+
def __init__(self, channels):
|
372 |
+
super(SenceUnderstand, self).__init__()
|
373 |
+
self.channels = channels
|
374 |
+
self.conv1 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
|
375 |
+
nn.ReLU(inplace=True))
|
376 |
+
self.pool = nn.AdaptiveAvgPool2d(8)
|
377 |
+
self.fc = nn.Sequential(nn.Linear(512 * 8 * 8, self.channels),
|
378 |
+
nn.ReLU(inplace=True))
|
379 |
+
self.conv2 = nn.Sequential(
|
380 |
+
nn.Conv2d(in_channels=self.channels, out_channels=self.channels, kernel_size=1, padding=0),
|
381 |
+
nn.ReLU(inplace=True))
|
382 |
+
self.initial_params()
|
383 |
+
|
384 |
+
def forward(self, x):
|
385 |
+
n, c, h, w = x.size()
|
386 |
+
x = self.conv1(x)
|
387 |
+
x = self.pool(x)
|
388 |
+
x = x.view(n, -1)
|
389 |
+
x = self.fc(x)
|
390 |
+
x = x.view(n, self.channels, 1, 1)
|
391 |
+
x = self.conv2(x)
|
392 |
+
x = x.repeat(1, 1, h, w)
|
393 |
+
return x
|
394 |
+
|
395 |
+
def initial_params(self, dev=0.01):
|
396 |
+
for m in self.modules():
|
397 |
+
if isinstance(m, nn.Conv2d):
|
398 |
+
# print torch.sum(m.weight)
|
399 |
+
m.weight.data.normal_(0, dev)
|
400 |
+
if m.bias is not None:
|
401 |
+
m.bias.data.fill_(0)
|
402 |
+
elif isinstance(m, nn.ConvTranspose2d):
|
403 |
+
# print torch.sum(m.weight)
|
404 |
+
m.weight.data.normal_(0, dev)
|
405 |
+
if m.bias is not None:
|
406 |
+
m.bias.data.fill_(0)
|
407 |
+
elif isinstance(m, nn.Linear):
|
408 |
+
m.weight.data.normal_(0, dev)
|
409 |
+
|
410 |
+
|
411 |
+
if __name__ == '__main__':
|
412 |
+
net = DepthNet(depth=50, pretrained=True)
|
413 |
+
print(net)
|
414 |
+
inputs = torch.ones(4,3,128,128)
|
415 |
+
out = net(inputs)
|
416 |
+
print(out.size())
|
417 |
+
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/LICENSE
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
https://github.com/compphoto/BoostingMonocularDepth
|
2 |
+
|
3 |
+
Copyright 2021, Seyed Mahdi Hosseini Miangoleh, Sebastian Dille, Computational Photography Laboratory. All rights reserved.
|
4 |
+
|
5 |
+
This software is for academic use only. A redistribution of this
|
6 |
+
software, with or without modifications, has to be for academic
|
7 |
+
use only, while giving the appropriate credit to the original
|
8 |
+
authors of the software. The methods implemented as a part of
|
9 |
+
this software may be covered under patents or patent applications.
|
10 |
+
|
11 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR IMPLIED
|
12 |
+
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
13 |
+
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
|
14 |
+
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
15 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
16 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
17 |
+
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
18 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
19 |
+
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/__init__.py
ADDED
File without changes
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/models/__init__.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This package contains modules related to objective functions, optimizations, and network architectures.
|
2 |
+
|
3 |
+
To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
|
4 |
+
You need to implement the following five functions:
|
5 |
+
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
|
6 |
+
-- <set_input>: unpack data from dataset and apply preprocessing.
|
7 |
+
-- <forward>: produce intermediate results.
|
8 |
+
-- <optimize_parameters>: calculate loss, gradients, and update network weights.
|
9 |
+
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
|
10 |
+
|
11 |
+
In the function <__init__>, you need to define four lists:
|
12 |
+
-- self.loss_names (str list): specify the training losses that you want to plot and save.
|
13 |
+
-- self.model_names (str list): define networks used in our training.
|
14 |
+
-- self.visual_names (str list): specify the images that you want to display and save.
|
15 |
+
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
|
16 |
+
|
17 |
+
Now you can use the model class by specifying flag '--model dummy'.
|
18 |
+
See our template model class 'template_model.py' for more details.
|
19 |
+
"""
|
20 |
+
|
21 |
+
import importlib
|
22 |
+
from .base_model import BaseModel
|
23 |
+
|
24 |
+
|
25 |
+
def find_model_using_name(model_name):
|
26 |
+
"""Import the module "models/[model_name]_model.py".
|
27 |
+
|
28 |
+
In the file, the class called DatasetNameModel() will
|
29 |
+
be instantiated. It has to be a subclass of BaseModel,
|
30 |
+
and it is case-insensitive.
|
31 |
+
"""
|
32 |
+
model_filename = "controlnet_aux.leres.pix2pix.models." + model_name + "_model"
|
33 |
+
modellib = importlib.import_module(model_filename)
|
34 |
+
model = None
|
35 |
+
target_model_name = model_name.replace('_', '') + 'model'
|
36 |
+
for name, cls in modellib.__dict__.items():
|
37 |
+
if name.lower() == target_model_name.lower() \
|
38 |
+
and issubclass(cls, BaseModel):
|
39 |
+
model = cls
|
40 |
+
|
41 |
+
if model is None:
|
42 |
+
print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
|
43 |
+
exit(0)
|
44 |
+
|
45 |
+
return model
|
46 |
+
|
47 |
+
|
48 |
+
def get_option_setter(model_name):
|
49 |
+
"""Return the static method <modify_commandline_options> of the model class."""
|
50 |
+
model_class = find_model_using_name(model_name)
|
51 |
+
return model_class.modify_commandline_options
|
52 |
+
|
53 |
+
|
54 |
+
def create_model(opt):
|
55 |
+
"""Create a model given the option.
|
56 |
+
|
57 |
+
This function warps the class CustomDatasetDataLoader.
|
58 |
+
This is the main interface between this package and 'train.py'/'test.py'
|
59 |
+
|
60 |
+
Example:
|
61 |
+
>>> from models import create_model
|
62 |
+
>>> model = create_model(opt)
|
63 |
+
"""
|
64 |
+
model = find_model_using_name(opt.model)
|
65 |
+
instance = model(opt)
|
66 |
+
print("model [%s] was created" % type(instance).__name__)
|
67 |
+
return instance
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model.py
ADDED
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gc
|
2 |
+
import os
|
3 |
+
from abc import ABC, abstractmethod
|
4 |
+
from collections import OrderedDict
|
5 |
+
|
6 |
+
import torch
|
7 |
+
|
8 |
+
from ....util import torch_gc
|
9 |
+
from . import networks
|
10 |
+
|
11 |
+
|
12 |
+
class BaseModel(ABC):
|
13 |
+
"""This class is an abstract base class (ABC) for models.
|
14 |
+
To create a subclass, you need to implement the following five functions:
|
15 |
+
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
|
16 |
+
-- <set_input>: unpack data from dataset and apply preprocessing.
|
17 |
+
-- <forward>: produce intermediate results.
|
18 |
+
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
|
19 |
+
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
|
20 |
+
"""
|
21 |
+
|
22 |
+
def __init__(self, opt):
|
23 |
+
"""Initialize the BaseModel class.
|
24 |
+
|
25 |
+
Parameters:
|
26 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
27 |
+
|
28 |
+
When creating your custom class, you need to implement your own initialization.
|
29 |
+
In this function, you should first call <BaseModel.__init__(self, opt)>
|
30 |
+
Then, you need to define four lists:
|
31 |
+
-- self.loss_names (str list): specify the training losses that you want to plot and save.
|
32 |
+
-- self.model_names (str list): define networks used in our training.
|
33 |
+
-- self.visual_names (str list): specify the images that you want to display and save.
|
34 |
+
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
|
35 |
+
"""
|
36 |
+
self.opt = opt
|
37 |
+
self.gpu_ids = opt.gpu_ids
|
38 |
+
self.isTrain = opt.isTrain
|
39 |
+
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
|
40 |
+
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
|
41 |
+
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
|
42 |
+
torch.backends.cudnn.benchmark = True
|
43 |
+
self.loss_names = []
|
44 |
+
self.model_names = []
|
45 |
+
self.visual_names = []
|
46 |
+
self.optimizers = []
|
47 |
+
self.image_paths = []
|
48 |
+
self.metric = 0 # used for learning rate policy 'plateau'
|
49 |
+
|
50 |
+
@staticmethod
|
51 |
+
def modify_commandline_options(parser, is_train):
|
52 |
+
"""Add new model-specific options, and rewrite default values for existing options.
|
53 |
+
|
54 |
+
Parameters:
|
55 |
+
parser -- original option parser
|
56 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
57 |
+
|
58 |
+
Returns:
|
59 |
+
the modified parser.
|
60 |
+
"""
|
61 |
+
return parser
|
62 |
+
|
63 |
+
@abstractmethod
|
64 |
+
def set_input(self, input):
|
65 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
66 |
+
|
67 |
+
Parameters:
|
68 |
+
input (dict): includes the data itself and its metadata information.
|
69 |
+
"""
|
70 |
+
pass
|
71 |
+
|
72 |
+
@abstractmethod
|
73 |
+
def forward(self):
|
74 |
+
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
|
75 |
+
pass
|
76 |
+
|
77 |
+
@abstractmethod
|
78 |
+
def optimize_parameters(self):
|
79 |
+
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
|
80 |
+
pass
|
81 |
+
|
82 |
+
def setup(self, opt):
|
83 |
+
"""Load and print networks; create schedulers
|
84 |
+
|
85 |
+
Parameters:
|
86 |
+
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
87 |
+
"""
|
88 |
+
if self.isTrain:
|
89 |
+
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
|
90 |
+
if not self.isTrain or opt.continue_train:
|
91 |
+
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
|
92 |
+
self.load_networks(load_suffix)
|
93 |
+
self.print_networks(opt.verbose)
|
94 |
+
|
95 |
+
def eval(self):
|
96 |
+
"""Make models eval mode during test time"""
|
97 |
+
for name in self.model_names:
|
98 |
+
if isinstance(name, str):
|
99 |
+
net = getattr(self, 'net' + name)
|
100 |
+
net.eval()
|
101 |
+
|
102 |
+
def test(self):
|
103 |
+
"""Forward function used in test time.
|
104 |
+
|
105 |
+
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
|
106 |
+
It also calls <compute_visuals> to produce additional visualization results
|
107 |
+
"""
|
108 |
+
with torch.no_grad():
|
109 |
+
self.forward()
|
110 |
+
self.compute_visuals()
|
111 |
+
|
112 |
+
def compute_visuals(self):
|
113 |
+
"""Calculate additional output images for visdom and HTML visualization"""
|
114 |
+
pass
|
115 |
+
|
116 |
+
def get_image_paths(self):
|
117 |
+
""" Return image paths that are used to load current data"""
|
118 |
+
return self.image_paths
|
119 |
+
|
120 |
+
def update_learning_rate(self):
|
121 |
+
"""Update learning rates for all the networks; called at the end of every epoch"""
|
122 |
+
old_lr = self.optimizers[0].param_groups[0]['lr']
|
123 |
+
for scheduler in self.schedulers:
|
124 |
+
if self.opt.lr_policy == 'plateau':
|
125 |
+
scheduler.step(self.metric)
|
126 |
+
else:
|
127 |
+
scheduler.step()
|
128 |
+
|
129 |
+
lr = self.optimizers[0].param_groups[0]['lr']
|
130 |
+
print('learning rate %.7f -> %.7f' % (old_lr, lr))
|
131 |
+
|
132 |
+
def get_current_visuals(self):
|
133 |
+
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
|
134 |
+
visual_ret = OrderedDict()
|
135 |
+
for name in self.visual_names:
|
136 |
+
if isinstance(name, str):
|
137 |
+
visual_ret[name] = getattr(self, name)
|
138 |
+
return visual_ret
|
139 |
+
|
140 |
+
def get_current_losses(self):
|
141 |
+
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
|
142 |
+
errors_ret = OrderedDict()
|
143 |
+
for name in self.loss_names:
|
144 |
+
if isinstance(name, str):
|
145 |
+
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
|
146 |
+
return errors_ret
|
147 |
+
|
148 |
+
def save_networks(self, epoch):
|
149 |
+
"""Save all the networks to the disk.
|
150 |
+
|
151 |
+
Parameters:
|
152 |
+
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
|
153 |
+
"""
|
154 |
+
for name in self.model_names:
|
155 |
+
if isinstance(name, str):
|
156 |
+
save_filename = '%s_net_%s.pth' % (epoch, name)
|
157 |
+
save_path = os.path.join(self.save_dir, save_filename)
|
158 |
+
net = getattr(self, 'net' + name)
|
159 |
+
|
160 |
+
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
|
161 |
+
torch.save(net.module.cpu().state_dict(), save_path)
|
162 |
+
net.cuda(self.gpu_ids[0])
|
163 |
+
else:
|
164 |
+
torch.save(net.cpu().state_dict(), save_path)
|
165 |
+
|
166 |
+
def unload_network(self, name):
|
167 |
+
"""Unload network and gc.
|
168 |
+
"""
|
169 |
+
if isinstance(name, str):
|
170 |
+
net = getattr(self, 'net' + name)
|
171 |
+
del net
|
172 |
+
gc.collect()
|
173 |
+
torch_gc()
|
174 |
+
return None
|
175 |
+
|
176 |
+
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
|
177 |
+
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
|
178 |
+
key = keys[i]
|
179 |
+
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
|
180 |
+
if module.__class__.__name__.startswith('InstanceNorm') and \
|
181 |
+
(key == 'running_mean' or key == 'running_var'):
|
182 |
+
if getattr(module, key) is None:
|
183 |
+
state_dict.pop('.'.join(keys))
|
184 |
+
if module.__class__.__name__.startswith('InstanceNorm') and \
|
185 |
+
(key == 'num_batches_tracked'):
|
186 |
+
state_dict.pop('.'.join(keys))
|
187 |
+
else:
|
188 |
+
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
|
189 |
+
|
190 |
+
def load_networks(self, epoch):
|
191 |
+
"""Load all the networks from the disk.
|
192 |
+
|
193 |
+
Parameters:
|
194 |
+
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
|
195 |
+
"""
|
196 |
+
for name in self.model_names:
|
197 |
+
if isinstance(name, str):
|
198 |
+
load_filename = '%s_net_%s.pth' % (epoch, name)
|
199 |
+
load_path = os.path.join(self.save_dir, load_filename)
|
200 |
+
net = getattr(self, 'net' + name)
|
201 |
+
if isinstance(net, torch.nn.DataParallel):
|
202 |
+
net = net.module
|
203 |
+
# print('Loading depth boost model from %s' % load_path)
|
204 |
+
# if you are using PyTorch newer than 0.4 (e.g., built from
|
205 |
+
# GitHub source), you can remove str() on self.device
|
206 |
+
state_dict = torch.load(load_path, map_location=str(self.device))
|
207 |
+
if hasattr(state_dict, '_metadata'):
|
208 |
+
del state_dict._metadata
|
209 |
+
|
210 |
+
# patch InstanceNorm checkpoints prior to 0.4
|
211 |
+
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
|
212 |
+
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
|
213 |
+
net.load_state_dict(state_dict)
|
214 |
+
|
215 |
+
def print_networks(self, verbose):
|
216 |
+
"""Print the total number of parameters in the network and (if verbose) network architecture
|
217 |
+
|
218 |
+
Parameters:
|
219 |
+
verbose (bool) -- if verbose: print the network architecture
|
220 |
+
"""
|
221 |
+
print('---------- Networks initialized -------------')
|
222 |
+
for name in self.model_names:
|
223 |
+
if isinstance(name, str):
|
224 |
+
net = getattr(self, 'net' + name)
|
225 |
+
num_params = 0
|
226 |
+
for param in net.parameters():
|
227 |
+
num_params += param.numel()
|
228 |
+
if verbose:
|
229 |
+
print(net)
|
230 |
+
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
|
231 |
+
print('-----------------------------------------------')
|
232 |
+
|
233 |
+
def set_requires_grad(self, nets, requires_grad=False):
|
234 |
+
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
|
235 |
+
Parameters:
|
236 |
+
nets (network list) -- a list of networks
|
237 |
+
requires_grad (bool) -- whether the networks require gradients or not
|
238 |
+
"""
|
239 |
+
if not isinstance(nets, list):
|
240 |
+
nets = [nets]
|
241 |
+
for net in nets:
|
242 |
+
if net is not None:
|
243 |
+
for param in net.parameters():
|
244 |
+
param.requires_grad = requires_grad
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model_hg.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
|
4 |
+
class BaseModelHG():
|
5 |
+
def name(self):
|
6 |
+
return 'BaseModel'
|
7 |
+
|
8 |
+
def initialize(self, opt):
|
9 |
+
self.opt = opt
|
10 |
+
self.gpu_ids = opt.gpu_ids
|
11 |
+
self.isTrain = opt.isTrain
|
12 |
+
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
|
13 |
+
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
|
14 |
+
|
15 |
+
def set_input(self, input):
|
16 |
+
self.input = input
|
17 |
+
|
18 |
+
def forward(self):
|
19 |
+
pass
|
20 |
+
|
21 |
+
# used in test time, no backprop
|
22 |
+
def test(self):
|
23 |
+
pass
|
24 |
+
|
25 |
+
def get_image_paths(self):
|
26 |
+
pass
|
27 |
+
|
28 |
+
def optimize_parameters(self):
|
29 |
+
pass
|
30 |
+
|
31 |
+
def get_current_visuals(self):
|
32 |
+
return self.input
|
33 |
+
|
34 |
+
def get_current_errors(self):
|
35 |
+
return {}
|
36 |
+
|
37 |
+
def save(self, label):
|
38 |
+
pass
|
39 |
+
|
40 |
+
# helper saving function that can be used by subclasses
|
41 |
+
def save_network(self, network, network_label, epoch_label, gpu_ids):
|
42 |
+
save_filename = '_%s_net_%s.pth' % (epoch_label, network_label)
|
43 |
+
save_path = os.path.join(self.save_dir, save_filename)
|
44 |
+
torch.save(network.cpu().state_dict(), save_path)
|
45 |
+
if len(gpu_ids) and torch.cuda.is_available():
|
46 |
+
network.cuda(device_id=gpu_ids[0])
|
47 |
+
|
48 |
+
# helper loading function that can be used by subclasses
|
49 |
+
def load_network(self, network, network_label, epoch_label):
|
50 |
+
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
|
51 |
+
save_path = os.path.join(self.save_dir, save_filename)
|
52 |
+
print(save_path)
|
53 |
+
model = torch.load(save_path)
|
54 |
+
return model
|
55 |
+
# network.load_state_dict(torch.load(save_path))
|
56 |
+
|
57 |
+
def update_learning_rate():
|
58 |
+
pass
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/models/networks.py
ADDED
@@ -0,0 +1,623 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from torch.nn import init
|
4 |
+
import functools
|
5 |
+
from torch.optim import lr_scheduler
|
6 |
+
|
7 |
+
|
8 |
+
###############################################################################
|
9 |
+
# Helper Functions
|
10 |
+
###############################################################################
|
11 |
+
|
12 |
+
|
13 |
+
class Identity(nn.Module):
|
14 |
+
def forward(self, x):
|
15 |
+
return x
|
16 |
+
|
17 |
+
|
18 |
+
def get_norm_layer(norm_type='instance'):
|
19 |
+
"""Return a normalization layer
|
20 |
+
|
21 |
+
Parameters:
|
22 |
+
norm_type (str) -- the name of the normalization layer: batch | instance | none
|
23 |
+
|
24 |
+
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
|
25 |
+
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
|
26 |
+
"""
|
27 |
+
if norm_type == 'batch':
|
28 |
+
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
|
29 |
+
elif norm_type == 'instance':
|
30 |
+
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
|
31 |
+
elif norm_type == 'none':
|
32 |
+
def norm_layer(x): return Identity()
|
33 |
+
else:
|
34 |
+
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
|
35 |
+
return norm_layer
|
36 |
+
|
37 |
+
|
38 |
+
def get_scheduler(optimizer, opt):
|
39 |
+
"""Return a learning rate scheduler
|
40 |
+
|
41 |
+
Parameters:
|
42 |
+
optimizer -- the optimizer of the network
|
43 |
+
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
|
44 |
+
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
|
45 |
+
|
46 |
+
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
|
47 |
+
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
|
48 |
+
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
|
49 |
+
See https://pytorch.org/docs/stable/optim.html for more details.
|
50 |
+
"""
|
51 |
+
if opt.lr_policy == 'linear':
|
52 |
+
def lambda_rule(epoch):
|
53 |
+
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
|
54 |
+
return lr_l
|
55 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
|
56 |
+
elif opt.lr_policy == 'step':
|
57 |
+
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
|
58 |
+
elif opt.lr_policy == 'plateau':
|
59 |
+
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
|
60 |
+
elif opt.lr_policy == 'cosine':
|
61 |
+
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
|
62 |
+
else:
|
63 |
+
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
|
64 |
+
return scheduler
|
65 |
+
|
66 |
+
|
67 |
+
def init_weights(net, init_type='normal', init_gain=0.02):
|
68 |
+
"""Initialize network weights.
|
69 |
+
|
70 |
+
Parameters:
|
71 |
+
net (network) -- network to be initialized
|
72 |
+
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
|
73 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
74 |
+
|
75 |
+
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
|
76 |
+
work better for some applications. Feel free to try yourself.
|
77 |
+
"""
|
78 |
+
def init_func(m): # define the initialization function
|
79 |
+
classname = m.__class__.__name__
|
80 |
+
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
|
81 |
+
if init_type == 'normal':
|
82 |
+
init.normal_(m.weight.data, 0.0, init_gain)
|
83 |
+
elif init_type == 'xavier':
|
84 |
+
init.xavier_normal_(m.weight.data, gain=init_gain)
|
85 |
+
elif init_type == 'kaiming':
|
86 |
+
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
|
87 |
+
elif init_type == 'orthogonal':
|
88 |
+
init.orthogonal_(m.weight.data, gain=init_gain)
|
89 |
+
else:
|
90 |
+
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
|
91 |
+
if hasattr(m, 'bias') and m.bias is not None:
|
92 |
+
init.constant_(m.bias.data, 0.0)
|
93 |
+
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
|
94 |
+
init.normal_(m.weight.data, 1.0, init_gain)
|
95 |
+
init.constant_(m.bias.data, 0.0)
|
96 |
+
|
97 |
+
# print('initialize network with %s' % init_type)
|
98 |
+
net.apply(init_func) # apply the initialization function <init_func>
|
99 |
+
|
100 |
+
|
101 |
+
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
|
102 |
+
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
|
103 |
+
Parameters:
|
104 |
+
net (network) -- the network to be initialized
|
105 |
+
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
|
106 |
+
gain (float) -- scaling factor for normal, xavier and orthogonal.
|
107 |
+
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
|
108 |
+
|
109 |
+
Return an initialized network.
|
110 |
+
"""
|
111 |
+
if len(gpu_ids) > 0:
|
112 |
+
assert(torch.cuda.is_available())
|
113 |
+
net.to(gpu_ids[0])
|
114 |
+
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
|
115 |
+
init_weights(net, init_type, init_gain=init_gain)
|
116 |
+
return net
|
117 |
+
|
118 |
+
|
119 |
+
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
|
120 |
+
"""Create a generator
|
121 |
+
|
122 |
+
Parameters:
|
123 |
+
input_nc (int) -- the number of channels in input images
|
124 |
+
output_nc (int) -- the number of channels in output images
|
125 |
+
ngf (int) -- the number of filters in the last conv layer
|
126 |
+
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
|
127 |
+
norm (str) -- the name of normalization layers used in the network: batch | instance | none
|
128 |
+
use_dropout (bool) -- if use dropout layers.
|
129 |
+
init_type (str) -- the name of our initialization method.
|
130 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
131 |
+
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
|
132 |
+
|
133 |
+
Returns a generator
|
134 |
+
|
135 |
+
Our current implementation provides two types of generators:
|
136 |
+
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
|
137 |
+
The original U-Net paper: https://arxiv.org/abs/1505.04597
|
138 |
+
|
139 |
+
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
|
140 |
+
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
|
141 |
+
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
|
142 |
+
|
143 |
+
|
144 |
+
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
|
145 |
+
"""
|
146 |
+
net = None
|
147 |
+
norm_layer = get_norm_layer(norm_type=norm)
|
148 |
+
|
149 |
+
if netG == 'resnet_9blocks':
|
150 |
+
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
|
151 |
+
elif netG == 'resnet_6blocks':
|
152 |
+
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
|
153 |
+
elif netG == 'resnet_12blocks':
|
154 |
+
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=12)
|
155 |
+
elif netG == 'unet_128':
|
156 |
+
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
157 |
+
elif netG == 'unet_256':
|
158 |
+
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
159 |
+
elif netG == 'unet_672':
|
160 |
+
net = UnetGenerator(input_nc, output_nc, 5, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
161 |
+
elif netG == 'unet_960':
|
162 |
+
net = UnetGenerator(input_nc, output_nc, 6, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
163 |
+
elif netG == 'unet_1024':
|
164 |
+
net = UnetGenerator(input_nc, output_nc, 10, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
165 |
+
else:
|
166 |
+
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
|
167 |
+
return init_net(net, init_type, init_gain, gpu_ids)
|
168 |
+
|
169 |
+
|
170 |
+
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
|
171 |
+
"""Create a discriminator
|
172 |
+
|
173 |
+
Parameters:
|
174 |
+
input_nc (int) -- the number of channels in input images
|
175 |
+
ndf (int) -- the number of filters in the first conv layer
|
176 |
+
netD (str) -- the architecture's name: basic | n_layers | pixel
|
177 |
+
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
|
178 |
+
norm (str) -- the type of normalization layers used in the network.
|
179 |
+
init_type (str) -- the name of the initialization method.
|
180 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
181 |
+
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
|
182 |
+
|
183 |
+
Returns a discriminator
|
184 |
+
|
185 |
+
Our current implementation provides three types of discriminators:
|
186 |
+
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
|
187 |
+
It can classify whether 70×70 overlapping patches are real or fake.
|
188 |
+
Such a patch-level discriminator architecture has fewer parameters
|
189 |
+
than a full-image discriminator and can work on arbitrarily-sized images
|
190 |
+
in a fully convolutional fashion.
|
191 |
+
|
192 |
+
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
|
193 |
+
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
|
194 |
+
|
195 |
+
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
|
196 |
+
It encourages greater color diversity but has no effect on spatial statistics.
|
197 |
+
|
198 |
+
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
|
199 |
+
"""
|
200 |
+
net = None
|
201 |
+
norm_layer = get_norm_layer(norm_type=norm)
|
202 |
+
|
203 |
+
if netD == 'basic': # default PatchGAN classifier
|
204 |
+
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
|
205 |
+
elif netD == 'n_layers': # more options
|
206 |
+
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
|
207 |
+
elif netD == 'pixel': # classify if each pixel is real or fake
|
208 |
+
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
|
209 |
+
else:
|
210 |
+
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
|
211 |
+
return init_net(net, init_type, init_gain, gpu_ids)
|
212 |
+
|
213 |
+
|
214 |
+
##############################################################################
|
215 |
+
# Classes
|
216 |
+
##############################################################################
|
217 |
+
class GANLoss(nn.Module):
|
218 |
+
"""Define different GAN objectives.
|
219 |
+
|
220 |
+
The GANLoss class abstracts away the need to create the target label tensor
|
221 |
+
that has the same size as the input.
|
222 |
+
"""
|
223 |
+
|
224 |
+
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
|
225 |
+
""" Initialize the GANLoss class.
|
226 |
+
|
227 |
+
Parameters:
|
228 |
+
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
|
229 |
+
target_real_label (bool) - - label for a real image
|
230 |
+
target_fake_label (bool) - - label of a fake image
|
231 |
+
|
232 |
+
Note: Do not use sigmoid as the last layer of Discriminator.
|
233 |
+
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
|
234 |
+
"""
|
235 |
+
super(GANLoss, self).__init__()
|
236 |
+
self.register_buffer('real_label', torch.tensor(target_real_label))
|
237 |
+
self.register_buffer('fake_label', torch.tensor(target_fake_label))
|
238 |
+
self.gan_mode = gan_mode
|
239 |
+
if gan_mode == 'lsgan':
|
240 |
+
self.loss = nn.MSELoss()
|
241 |
+
elif gan_mode == 'vanilla':
|
242 |
+
self.loss = nn.BCEWithLogitsLoss()
|
243 |
+
elif gan_mode in ['wgangp']:
|
244 |
+
self.loss = None
|
245 |
+
else:
|
246 |
+
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
|
247 |
+
|
248 |
+
def get_target_tensor(self, prediction, target_is_real):
|
249 |
+
"""Create label tensors with the same size as the input.
|
250 |
+
|
251 |
+
Parameters:
|
252 |
+
prediction (tensor) - - tpyically the prediction from a discriminator
|
253 |
+
target_is_real (bool) - - if the ground truth label is for real images or fake images
|
254 |
+
|
255 |
+
Returns:
|
256 |
+
A label tensor filled with ground truth label, and with the size of the input
|
257 |
+
"""
|
258 |
+
|
259 |
+
if target_is_real:
|
260 |
+
target_tensor = self.real_label
|
261 |
+
else:
|
262 |
+
target_tensor = self.fake_label
|
263 |
+
return target_tensor.expand_as(prediction)
|
264 |
+
|
265 |
+
def __call__(self, prediction, target_is_real):
|
266 |
+
"""Calculate loss given Discriminator's output and grount truth labels.
|
267 |
+
|
268 |
+
Parameters:
|
269 |
+
prediction (tensor) - - tpyically the prediction output from a discriminator
|
270 |
+
target_is_real (bool) - - if the ground truth label is for real images or fake images
|
271 |
+
|
272 |
+
Returns:
|
273 |
+
the calculated loss.
|
274 |
+
"""
|
275 |
+
if self.gan_mode in ['lsgan', 'vanilla']:
|
276 |
+
target_tensor = self.get_target_tensor(prediction, target_is_real)
|
277 |
+
loss = self.loss(prediction, target_tensor)
|
278 |
+
elif self.gan_mode == 'wgangp':
|
279 |
+
if target_is_real:
|
280 |
+
loss = -prediction.mean()
|
281 |
+
else:
|
282 |
+
loss = prediction.mean()
|
283 |
+
return loss
|
284 |
+
|
285 |
+
|
286 |
+
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
|
287 |
+
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
|
288 |
+
|
289 |
+
Arguments:
|
290 |
+
netD (network) -- discriminator network
|
291 |
+
real_data (tensor array) -- real images
|
292 |
+
fake_data (tensor array) -- generated images from the generator
|
293 |
+
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
|
294 |
+
type (str) -- if we mix real and fake data or not [real | fake | mixed].
|
295 |
+
constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
|
296 |
+
lambda_gp (float) -- weight for this loss
|
297 |
+
|
298 |
+
Returns the gradient penalty loss
|
299 |
+
"""
|
300 |
+
if lambda_gp > 0.0:
|
301 |
+
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
|
302 |
+
interpolatesv = real_data
|
303 |
+
elif type == 'fake':
|
304 |
+
interpolatesv = fake_data
|
305 |
+
elif type == 'mixed':
|
306 |
+
alpha = torch.rand(real_data.shape[0], 1, device=device)
|
307 |
+
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
|
308 |
+
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
|
309 |
+
else:
|
310 |
+
raise NotImplementedError('{} not implemented'.format(type))
|
311 |
+
interpolatesv.requires_grad_(True)
|
312 |
+
disc_interpolates = netD(interpolatesv)
|
313 |
+
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
|
314 |
+
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
|
315 |
+
create_graph=True, retain_graph=True, only_inputs=True)
|
316 |
+
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
|
317 |
+
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
|
318 |
+
return gradient_penalty, gradients
|
319 |
+
else:
|
320 |
+
return 0.0, None
|
321 |
+
|
322 |
+
|
323 |
+
class ResnetGenerator(nn.Module):
|
324 |
+
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
|
325 |
+
|
326 |
+
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
|
327 |
+
"""
|
328 |
+
|
329 |
+
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
|
330 |
+
"""Construct a Resnet-based generator
|
331 |
+
|
332 |
+
Parameters:
|
333 |
+
input_nc (int) -- the number of channels in input images
|
334 |
+
output_nc (int) -- the number of channels in output images
|
335 |
+
ngf (int) -- the number of filters in the last conv layer
|
336 |
+
norm_layer -- normalization layer
|
337 |
+
use_dropout (bool) -- if use dropout layers
|
338 |
+
n_blocks (int) -- the number of ResNet blocks
|
339 |
+
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
|
340 |
+
"""
|
341 |
+
assert(n_blocks >= 0)
|
342 |
+
super(ResnetGenerator, self).__init__()
|
343 |
+
if type(norm_layer) == functools.partial:
|
344 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
345 |
+
else:
|
346 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
347 |
+
|
348 |
+
model = [nn.ReflectionPad2d(3),
|
349 |
+
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
|
350 |
+
norm_layer(ngf),
|
351 |
+
nn.ReLU(True)]
|
352 |
+
|
353 |
+
n_downsampling = 2
|
354 |
+
for i in range(n_downsampling): # add downsampling layers
|
355 |
+
mult = 2 ** i
|
356 |
+
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
|
357 |
+
norm_layer(ngf * mult * 2),
|
358 |
+
nn.ReLU(True)]
|
359 |
+
|
360 |
+
mult = 2 ** n_downsampling
|
361 |
+
for i in range(n_blocks): # add ResNet blocks
|
362 |
+
|
363 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
|
364 |
+
|
365 |
+
for i in range(n_downsampling): # add upsampling layers
|
366 |
+
mult = 2 ** (n_downsampling - i)
|
367 |
+
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
|
368 |
+
kernel_size=3, stride=2,
|
369 |
+
padding=1, output_padding=1,
|
370 |
+
bias=use_bias),
|
371 |
+
norm_layer(int(ngf * mult / 2)),
|
372 |
+
nn.ReLU(True)]
|
373 |
+
model += [nn.ReflectionPad2d(3)]
|
374 |
+
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
375 |
+
model += [nn.Tanh()]
|
376 |
+
|
377 |
+
self.model = nn.Sequential(*model)
|
378 |
+
|
379 |
+
def forward(self, input):
|
380 |
+
"""Standard forward"""
|
381 |
+
return self.model(input)
|
382 |
+
|
383 |
+
|
384 |
+
class ResnetBlock(nn.Module):
|
385 |
+
"""Define a Resnet block"""
|
386 |
+
|
387 |
+
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
|
388 |
+
"""Initialize the Resnet block
|
389 |
+
|
390 |
+
A resnet block is a conv block with skip connections
|
391 |
+
We construct a conv block with build_conv_block function,
|
392 |
+
and implement skip connections in <forward> function.
|
393 |
+
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
|
394 |
+
"""
|
395 |
+
super(ResnetBlock, self).__init__()
|
396 |
+
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
|
397 |
+
|
398 |
+
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
|
399 |
+
"""Construct a convolutional block.
|
400 |
+
|
401 |
+
Parameters:
|
402 |
+
dim (int) -- the number of channels in the conv layer.
|
403 |
+
padding_type (str) -- the name of padding layer: reflect | replicate | zero
|
404 |
+
norm_layer -- normalization layer
|
405 |
+
use_dropout (bool) -- if use dropout layers.
|
406 |
+
use_bias (bool) -- if the conv layer uses bias or not
|
407 |
+
|
408 |
+
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
|
409 |
+
"""
|
410 |
+
conv_block = []
|
411 |
+
p = 0
|
412 |
+
if padding_type == 'reflect':
|
413 |
+
conv_block += [nn.ReflectionPad2d(1)]
|
414 |
+
elif padding_type == 'replicate':
|
415 |
+
conv_block += [nn.ReplicationPad2d(1)]
|
416 |
+
elif padding_type == 'zero':
|
417 |
+
p = 1
|
418 |
+
else:
|
419 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
420 |
+
|
421 |
+
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
|
422 |
+
if use_dropout:
|
423 |
+
conv_block += [nn.Dropout(0.5)]
|
424 |
+
|
425 |
+
p = 0
|
426 |
+
if padding_type == 'reflect':
|
427 |
+
conv_block += [nn.ReflectionPad2d(1)]
|
428 |
+
elif padding_type == 'replicate':
|
429 |
+
conv_block += [nn.ReplicationPad2d(1)]
|
430 |
+
elif padding_type == 'zero':
|
431 |
+
p = 1
|
432 |
+
else:
|
433 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
434 |
+
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
|
435 |
+
|
436 |
+
return nn.Sequential(*conv_block)
|
437 |
+
|
438 |
+
def forward(self, x):
|
439 |
+
"""Forward function (with skip connections)"""
|
440 |
+
out = x + self.conv_block(x) # add skip connections
|
441 |
+
return out
|
442 |
+
|
443 |
+
|
444 |
+
class UnetGenerator(nn.Module):
|
445 |
+
"""Create a Unet-based generator"""
|
446 |
+
|
447 |
+
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
|
448 |
+
"""Construct a Unet generator
|
449 |
+
Parameters:
|
450 |
+
input_nc (int) -- the number of channels in input images
|
451 |
+
output_nc (int) -- the number of channels in output images
|
452 |
+
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
|
453 |
+
image of size 128x128 will become of size 1x1 # at the bottleneck
|
454 |
+
ngf (int) -- the number of filters in the last conv layer
|
455 |
+
norm_layer -- normalization layer
|
456 |
+
|
457 |
+
We construct the U-Net from the innermost layer to the outermost layer.
|
458 |
+
It is a recursive process.
|
459 |
+
"""
|
460 |
+
super(UnetGenerator, self).__init__()
|
461 |
+
# construct unet structure
|
462 |
+
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
|
463 |
+
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
|
464 |
+
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
|
465 |
+
# gradually reduce the number of filters from ngf * 8 to ngf
|
466 |
+
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
467 |
+
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
468 |
+
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
469 |
+
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
|
470 |
+
|
471 |
+
def forward(self, input):
|
472 |
+
"""Standard forward"""
|
473 |
+
return self.model(input)
|
474 |
+
|
475 |
+
|
476 |
+
class UnetSkipConnectionBlock(nn.Module):
|
477 |
+
"""Defines the Unet submodule with skip connection.
|
478 |
+
X -------------------identity----------------------
|
479 |
+
|-- downsampling -- |submodule| -- upsampling --|
|
480 |
+
"""
|
481 |
+
|
482 |
+
def __init__(self, outer_nc, inner_nc, input_nc=None,
|
483 |
+
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
|
484 |
+
"""Construct a Unet submodule with skip connections.
|
485 |
+
|
486 |
+
Parameters:
|
487 |
+
outer_nc (int) -- the number of filters in the outer conv layer
|
488 |
+
inner_nc (int) -- the number of filters in the inner conv layer
|
489 |
+
input_nc (int) -- the number of channels in input images/features
|
490 |
+
submodule (UnetSkipConnectionBlock) -- previously defined submodules
|
491 |
+
outermost (bool) -- if this module is the outermost module
|
492 |
+
innermost (bool) -- if this module is the innermost module
|
493 |
+
norm_layer -- normalization layer
|
494 |
+
use_dropout (bool) -- if use dropout layers.
|
495 |
+
"""
|
496 |
+
super(UnetSkipConnectionBlock, self).__init__()
|
497 |
+
self.outermost = outermost
|
498 |
+
if type(norm_layer) == functools.partial:
|
499 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
500 |
+
else:
|
501 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
502 |
+
if input_nc is None:
|
503 |
+
input_nc = outer_nc
|
504 |
+
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
|
505 |
+
stride=2, padding=1, bias=use_bias)
|
506 |
+
downrelu = nn.LeakyReLU(0.2, True)
|
507 |
+
downnorm = norm_layer(inner_nc)
|
508 |
+
uprelu = nn.ReLU(True)
|
509 |
+
upnorm = norm_layer(outer_nc)
|
510 |
+
|
511 |
+
if outermost:
|
512 |
+
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
|
513 |
+
kernel_size=4, stride=2,
|
514 |
+
padding=1)
|
515 |
+
down = [downconv]
|
516 |
+
up = [uprelu, upconv, nn.Tanh()]
|
517 |
+
model = down + [submodule] + up
|
518 |
+
elif innermost:
|
519 |
+
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
|
520 |
+
kernel_size=4, stride=2,
|
521 |
+
padding=1, bias=use_bias)
|
522 |
+
down = [downrelu, downconv]
|
523 |
+
up = [uprelu, upconv, upnorm]
|
524 |
+
model = down + up
|
525 |
+
else:
|
526 |
+
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
|
527 |
+
kernel_size=4, stride=2,
|
528 |
+
padding=1, bias=use_bias)
|
529 |
+
down = [downrelu, downconv, downnorm]
|
530 |
+
up = [uprelu, upconv, upnorm]
|
531 |
+
|
532 |
+
if use_dropout:
|
533 |
+
model = down + [submodule] + up + [nn.Dropout(0.5)]
|
534 |
+
else:
|
535 |
+
model = down + [submodule] + up
|
536 |
+
|
537 |
+
self.model = nn.Sequential(*model)
|
538 |
+
|
539 |
+
def forward(self, x):
|
540 |
+
if self.outermost:
|
541 |
+
return self.model(x)
|
542 |
+
else: # add skip connections
|
543 |
+
return torch.cat([x, self.model(x)], 1)
|
544 |
+
|
545 |
+
|
546 |
+
class NLayerDiscriminator(nn.Module):
|
547 |
+
"""Defines a PatchGAN discriminator"""
|
548 |
+
|
549 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
|
550 |
+
"""Construct a PatchGAN discriminator
|
551 |
+
|
552 |
+
Parameters:
|
553 |
+
input_nc (int) -- the number of channels in input images
|
554 |
+
ndf (int) -- the number of filters in the last conv layer
|
555 |
+
n_layers (int) -- the number of conv layers in the discriminator
|
556 |
+
norm_layer -- normalization layer
|
557 |
+
"""
|
558 |
+
super(NLayerDiscriminator, self).__init__()
|
559 |
+
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
|
560 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
561 |
+
else:
|
562 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
563 |
+
|
564 |
+
kw = 4
|
565 |
+
padw = 1
|
566 |
+
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
|
567 |
+
nf_mult = 1
|
568 |
+
nf_mult_prev = 1
|
569 |
+
for n in range(1, n_layers): # gradually increase the number of filters
|
570 |
+
nf_mult_prev = nf_mult
|
571 |
+
nf_mult = min(2 ** n, 8)
|
572 |
+
sequence += [
|
573 |
+
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
|
574 |
+
norm_layer(ndf * nf_mult),
|
575 |
+
nn.LeakyReLU(0.2, True)
|
576 |
+
]
|
577 |
+
|
578 |
+
nf_mult_prev = nf_mult
|
579 |
+
nf_mult = min(2 ** n_layers, 8)
|
580 |
+
sequence += [
|
581 |
+
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
|
582 |
+
norm_layer(ndf * nf_mult),
|
583 |
+
nn.LeakyReLU(0.2, True)
|
584 |
+
]
|
585 |
+
|
586 |
+
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
|
587 |
+
self.model = nn.Sequential(*sequence)
|
588 |
+
|
589 |
+
def forward(self, input):
|
590 |
+
"""Standard forward."""
|
591 |
+
return self.model(input)
|
592 |
+
|
593 |
+
|
594 |
+
class PixelDiscriminator(nn.Module):
|
595 |
+
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
|
596 |
+
|
597 |
+
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
|
598 |
+
"""Construct a 1x1 PatchGAN discriminator
|
599 |
+
|
600 |
+
Parameters:
|
601 |
+
input_nc (int) -- the number of channels in input images
|
602 |
+
ndf (int) -- the number of filters in the last conv layer
|
603 |
+
norm_layer -- normalization layer
|
604 |
+
"""
|
605 |
+
super(PixelDiscriminator, self).__init__()
|
606 |
+
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
|
607 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
608 |
+
else:
|
609 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
610 |
+
|
611 |
+
self.net = [
|
612 |
+
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
|
613 |
+
nn.LeakyReLU(0.2, True),
|
614 |
+
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
|
615 |
+
norm_layer(ndf * 2),
|
616 |
+
nn.LeakyReLU(0.2, True),
|
617 |
+
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
|
618 |
+
|
619 |
+
self.net = nn.Sequential(*self.net)
|
620 |
+
|
621 |
+
def forward(self, input):
|
622 |
+
"""Standard forward."""
|
623 |
+
return self.net(input)
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/models/pix2pix4depth_model.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from .base_model import BaseModel
|
3 |
+
from . import networks
|
4 |
+
|
5 |
+
|
6 |
+
class Pix2Pix4DepthModel(BaseModel):
|
7 |
+
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
|
8 |
+
|
9 |
+
The model training requires '--dataset_mode aligned' dataset.
|
10 |
+
By default, it uses a '--netG unet256' U-Net generator,
|
11 |
+
a '--netD basic' discriminator (PatchGAN),
|
12 |
+
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
|
13 |
+
|
14 |
+
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
|
15 |
+
"""
|
16 |
+
@staticmethod
|
17 |
+
def modify_commandline_options(parser, is_train=True):
|
18 |
+
"""Add new dataset-specific options, and rewrite default values for existing options.
|
19 |
+
|
20 |
+
Parameters:
|
21 |
+
parser -- original option parser
|
22 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
the modified parser.
|
26 |
+
|
27 |
+
For pix2pix, we do not use image buffer
|
28 |
+
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
|
29 |
+
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
|
30 |
+
"""
|
31 |
+
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
|
32 |
+
parser.set_defaults(input_nc=2,output_nc=1,norm='none', netG='unet_1024', dataset_mode='depthmerge')
|
33 |
+
if is_train:
|
34 |
+
parser.set_defaults(pool_size=0, gan_mode='vanilla',)
|
35 |
+
parser.add_argument('--lambda_L1', type=float, default=1000, help='weight for L1 loss')
|
36 |
+
return parser
|
37 |
+
|
38 |
+
def __init__(self, opt):
|
39 |
+
"""Initialize the pix2pix class.
|
40 |
+
|
41 |
+
Parameters:
|
42 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
43 |
+
"""
|
44 |
+
BaseModel.__init__(self, opt)
|
45 |
+
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
|
46 |
+
|
47 |
+
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
|
48 |
+
# self.loss_names = ['G_L1']
|
49 |
+
|
50 |
+
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
|
51 |
+
if self.isTrain:
|
52 |
+
self.visual_names = ['outer','inner', 'fake_B', 'real_B']
|
53 |
+
else:
|
54 |
+
self.visual_names = ['fake_B']
|
55 |
+
|
56 |
+
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
|
57 |
+
if self.isTrain:
|
58 |
+
self.model_names = ['G','D']
|
59 |
+
else: # during test time, only load G
|
60 |
+
self.model_names = ['G']
|
61 |
+
|
62 |
+
# define networks (both generator and discriminator)
|
63 |
+
self.netG = networks.define_G(opt.input_nc, opt.output_nc, 64, 'unet_1024', 'none',
|
64 |
+
False, 'normal', 0.02, self.gpu_ids)
|
65 |
+
|
66 |
+
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
|
67 |
+
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
|
68 |
+
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
|
69 |
+
|
70 |
+
if self.isTrain:
|
71 |
+
# define loss functions
|
72 |
+
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
|
73 |
+
self.criterionL1 = torch.nn.L1Loss()
|
74 |
+
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
|
75 |
+
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=1e-4, betas=(opt.beta1, 0.999))
|
76 |
+
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=2e-06, betas=(opt.beta1, 0.999))
|
77 |
+
self.optimizers.append(self.optimizer_G)
|
78 |
+
self.optimizers.append(self.optimizer_D)
|
79 |
+
|
80 |
+
def set_input_train(self, input):
|
81 |
+
self.outer = input['data_outer'].to(self.device)
|
82 |
+
self.outer = torch.nn.functional.interpolate(self.outer,(1024,1024),mode='bilinear',align_corners=False)
|
83 |
+
|
84 |
+
self.inner = input['data_inner'].to(self.device)
|
85 |
+
self.inner = torch.nn.functional.interpolate(self.inner,(1024,1024),mode='bilinear',align_corners=False)
|
86 |
+
|
87 |
+
self.image_paths = input['image_path']
|
88 |
+
|
89 |
+
if self.isTrain:
|
90 |
+
self.gtfake = input['data_gtfake'].to(self.device)
|
91 |
+
self.gtfake = torch.nn.functional.interpolate(self.gtfake, (1024, 1024), mode='bilinear', align_corners=False)
|
92 |
+
self.real_B = self.gtfake
|
93 |
+
|
94 |
+
self.real_A = torch.cat((self.outer, self.inner), 1)
|
95 |
+
|
96 |
+
def set_input(self, outer, inner):
|
97 |
+
inner = torch.from_numpy(inner).unsqueeze(0).unsqueeze(0)
|
98 |
+
outer = torch.from_numpy(outer).unsqueeze(0).unsqueeze(0)
|
99 |
+
|
100 |
+
inner = (inner - torch.min(inner))/(torch.max(inner)-torch.min(inner))
|
101 |
+
outer = (outer - torch.min(outer))/(torch.max(outer)-torch.min(outer))
|
102 |
+
|
103 |
+
inner = self.normalize(inner)
|
104 |
+
outer = self.normalize(outer)
|
105 |
+
|
106 |
+
self.real_A = torch.cat((outer, inner), 1).to(self.device)
|
107 |
+
|
108 |
+
|
109 |
+
def normalize(self, input):
|
110 |
+
input = input * 2
|
111 |
+
input = input - 1
|
112 |
+
return input
|
113 |
+
|
114 |
+
def forward(self):
|
115 |
+
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
|
116 |
+
self.fake_B = self.netG(self.real_A) # G(A)
|
117 |
+
|
118 |
+
def backward_D(self):
|
119 |
+
"""Calculate GAN loss for the discriminator"""
|
120 |
+
# Fake; stop backprop to the generator by detaching fake_B
|
121 |
+
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
|
122 |
+
pred_fake = self.netD(fake_AB.detach())
|
123 |
+
self.loss_D_fake = self.criterionGAN(pred_fake, False)
|
124 |
+
# Real
|
125 |
+
real_AB = torch.cat((self.real_A, self.real_B), 1)
|
126 |
+
pred_real = self.netD(real_AB)
|
127 |
+
self.loss_D_real = self.criterionGAN(pred_real, True)
|
128 |
+
# combine loss and calculate gradients
|
129 |
+
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
|
130 |
+
self.loss_D.backward()
|
131 |
+
|
132 |
+
def backward_G(self):
|
133 |
+
"""Calculate GAN and L1 loss for the generator"""
|
134 |
+
# First, G(A) should fake the discriminator
|
135 |
+
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
|
136 |
+
pred_fake = self.netD(fake_AB)
|
137 |
+
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
|
138 |
+
# Second, G(A) = B
|
139 |
+
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
|
140 |
+
# combine loss and calculate gradients
|
141 |
+
self.loss_G = self.loss_G_L1 + self.loss_G_GAN
|
142 |
+
self.loss_G.backward()
|
143 |
+
|
144 |
+
def optimize_parameters(self):
|
145 |
+
self.forward() # compute fake images: G(A)
|
146 |
+
# update D
|
147 |
+
self.set_requires_grad(self.netD, True) # enable backprop for D
|
148 |
+
self.optimizer_D.zero_grad() # set D's gradients to zero
|
149 |
+
self.backward_D() # calculate gradients for D
|
150 |
+
self.optimizer_D.step() # update D's weights
|
151 |
+
# update G
|
152 |
+
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
|
153 |
+
self.optimizer_G.zero_grad() # set G's gradients to zero
|
154 |
+
self.backward_G() # calculate graidents for G
|
155 |
+
self.optimizer_G.step() # udpate G's weights
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/options/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
"""This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/options/base_options.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
from ...pix2pix.util import util
|
4 |
+
# import torch
|
5 |
+
from ...pix2pix import models
|
6 |
+
# import pix2pix.data
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
class BaseOptions():
|
10 |
+
"""This class defines options used during both training and test time.
|
11 |
+
|
12 |
+
It also implements several helper functions such as parsing, printing, and saving the options.
|
13 |
+
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(self):
|
17 |
+
"""Reset the class; indicates the class hasn't been initailized"""
|
18 |
+
self.initialized = False
|
19 |
+
|
20 |
+
def initialize(self, parser):
|
21 |
+
"""Define the common options that are used in both training and test."""
|
22 |
+
# basic parameters
|
23 |
+
parser.add_argument('--dataroot', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
|
24 |
+
parser.add_argument('--name', type=str, default='void', help='mahdi_unet_new, scaled_unet')
|
25 |
+
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
|
26 |
+
parser.add_argument('--checkpoints_dir', type=str, default='./pix2pix/checkpoints', help='models are saved here')
|
27 |
+
# model parameters
|
28 |
+
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
|
29 |
+
parser.add_argument('--input_nc', type=int, default=2, help='# of input image channels: 3 for RGB and 1 for grayscale')
|
30 |
+
parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels: 3 for RGB and 1 for grayscale')
|
31 |
+
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
|
32 |
+
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
|
33 |
+
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
|
34 |
+
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
|
35 |
+
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
|
36 |
+
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
|
37 |
+
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
|
38 |
+
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
|
39 |
+
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
|
40 |
+
# dataset parameters
|
41 |
+
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
|
42 |
+
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
|
43 |
+
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
|
44 |
+
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
|
45 |
+
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
|
46 |
+
parser.add_argument('--load_size', type=int, default=672, help='scale images to this size')
|
47 |
+
parser.add_argument('--crop_size', type=int, default=672, help='then crop to this size')
|
48 |
+
parser.add_argument('--max_dataset_size', type=int, default=10000, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
|
49 |
+
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
|
50 |
+
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
|
51 |
+
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
|
52 |
+
# additional parameters
|
53 |
+
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
|
54 |
+
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
|
55 |
+
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
|
56 |
+
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
|
57 |
+
|
58 |
+
parser.add_argument('--data_dir', type=str, required=False,
|
59 |
+
help='input files directory images can be .png .jpg .tiff')
|
60 |
+
parser.add_argument('--output_dir', type=str, required=False,
|
61 |
+
help='result dir. result depth will be png. vides are JMPG as avi')
|
62 |
+
parser.add_argument('--savecrops', type=int, required=False)
|
63 |
+
parser.add_argument('--savewholeest', type=int, required=False)
|
64 |
+
parser.add_argument('--output_resolution', type=int, required=False,
|
65 |
+
help='0 for no restriction 1 for resize to input size')
|
66 |
+
parser.add_argument('--net_receptive_field_size', type=int, required=False)
|
67 |
+
parser.add_argument('--pix2pixsize', type=int, required=False)
|
68 |
+
parser.add_argument('--generatevideo', type=int, required=False)
|
69 |
+
parser.add_argument('--depthNet', type=int, required=False, help='0: midas 1:strurturedRL')
|
70 |
+
parser.add_argument('--R0', action='store_true')
|
71 |
+
parser.add_argument('--R20', action='store_true')
|
72 |
+
parser.add_argument('--Final', action='store_true')
|
73 |
+
parser.add_argument('--colorize_results', action='store_true')
|
74 |
+
parser.add_argument('--max_res', type=float, default=np.inf)
|
75 |
+
|
76 |
+
self.initialized = True
|
77 |
+
return parser
|
78 |
+
|
79 |
+
def gather_options(self):
|
80 |
+
"""Initialize our parser with basic options(only once).
|
81 |
+
Add additional model-specific and dataset-specific options.
|
82 |
+
These options are defined in the <modify_commandline_options> function
|
83 |
+
in model and dataset classes.
|
84 |
+
"""
|
85 |
+
if not self.initialized: # check if it has been initialized
|
86 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
87 |
+
parser = self.initialize(parser)
|
88 |
+
|
89 |
+
# get the basic options
|
90 |
+
opt, _ = parser.parse_known_args()
|
91 |
+
|
92 |
+
# modify model-related parser options
|
93 |
+
model_name = opt.model
|
94 |
+
model_option_setter = models.get_option_setter(model_name)
|
95 |
+
parser = model_option_setter(parser, self.isTrain)
|
96 |
+
opt, _ = parser.parse_known_args() # parse again with new defaults
|
97 |
+
|
98 |
+
# modify dataset-related parser options
|
99 |
+
# dataset_name = opt.dataset_mode
|
100 |
+
# dataset_option_setter = pix2pix.data.get_option_setter(dataset_name)
|
101 |
+
# parser = dataset_option_setter(parser, self.isTrain)
|
102 |
+
|
103 |
+
# save and return the parser
|
104 |
+
self.parser = parser
|
105 |
+
#return parser.parse_args() #EVIL
|
106 |
+
return opt
|
107 |
+
|
108 |
+
def print_options(self, opt):
|
109 |
+
"""Print and save options
|
110 |
+
|
111 |
+
It will print both current options and default values(if different).
|
112 |
+
It will save options into a text file / [checkpoints_dir] / opt.txt
|
113 |
+
"""
|
114 |
+
message = ''
|
115 |
+
message += '----------------- Options ---------------\n'
|
116 |
+
for k, v in sorted(vars(opt).items()):
|
117 |
+
comment = ''
|
118 |
+
default = self.parser.get_default(k)
|
119 |
+
if v != default:
|
120 |
+
comment = '\t[default: %s]' % str(default)
|
121 |
+
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
|
122 |
+
message += '----------------- End -------------------'
|
123 |
+
print(message)
|
124 |
+
|
125 |
+
# save to the disk
|
126 |
+
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
|
127 |
+
util.mkdirs(expr_dir)
|
128 |
+
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
|
129 |
+
with open(file_name, 'wt') as opt_file:
|
130 |
+
opt_file.write(message)
|
131 |
+
opt_file.write('\n')
|
132 |
+
|
133 |
+
def parse(self):
|
134 |
+
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
|
135 |
+
opt = self.gather_options()
|
136 |
+
opt.isTrain = self.isTrain # train or test
|
137 |
+
|
138 |
+
# process opt.suffix
|
139 |
+
if opt.suffix:
|
140 |
+
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
|
141 |
+
opt.name = opt.name + suffix
|
142 |
+
|
143 |
+
#self.print_options(opt)
|
144 |
+
|
145 |
+
# set gpu ids
|
146 |
+
str_ids = opt.gpu_ids.split(',')
|
147 |
+
opt.gpu_ids = []
|
148 |
+
for str_id in str_ids:
|
149 |
+
id = int(str_id)
|
150 |
+
if id >= 0:
|
151 |
+
opt.gpu_ids.append(id)
|
152 |
+
#if len(opt.gpu_ids) > 0:
|
153 |
+
# torch.cuda.set_device(opt.gpu_ids[0])
|
154 |
+
|
155 |
+
self.opt = opt
|
156 |
+
return self.opt
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/options/test_options.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .base_options import BaseOptions
|
2 |
+
|
3 |
+
|
4 |
+
class TestOptions(BaseOptions):
|
5 |
+
"""This class includes test options.
|
6 |
+
|
7 |
+
It also includes shared options defined in BaseOptions.
|
8 |
+
"""
|
9 |
+
|
10 |
+
def initialize(self, parser):
|
11 |
+
parser = BaseOptions.initialize(self, parser) # define shared options
|
12 |
+
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
|
13 |
+
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
|
14 |
+
# Dropout and Batchnorm has different behavioir during training and test.
|
15 |
+
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
|
16 |
+
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
|
17 |
+
# rewrite devalue values
|
18 |
+
parser.set_defaults(model='pix2pix4depth')
|
19 |
+
# To avoid cropping, the load_size should be the same as crop_size
|
20 |
+
parser.set_defaults(load_size=parser.get_default('crop_size'))
|
21 |
+
self.isTrain = False
|
22 |
+
return parser
|
controlnet_aux/src/controlnet_aux/leres/pix2pix/util/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
"""This package includes a miscellaneous collection of useful helper functions."""
|