ddoc commited on
Commit
13c5a27
·
1 Parent(s): 6cbfb02

Upload 31 files

Browse files
.github/ISSUE_TEMPLATE/bug_report.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug report
2
+ description: Create a report
3
+ title: "[Bug]: "
4
+
5
+ body:
6
+ - type: textarea
7
+ attributes:
8
+ label: Describe the bug
9
+ description: A clear and concise description of what the bug is.
10
+
11
+ - type: textarea
12
+ attributes:
13
+ label: Full console logs
14
+ description: |
15
+ The full console log of your terminal.
16
+ From `Python 3.10.*, Version: v1.*, Commit hash: *` to the end.
17
+ render: Shell
18
+ validations:
19
+ required: true
20
+
21
+ - type: textarea
22
+ attributes:
23
+ label: List of installed extensions
.github/ISSUE_TEMPLATE/feature_request.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: Feature request
3
+ about: Suggest an idea for this project
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ **Is your feature request related to a problem? Please describe.**
11
+ A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12
+
13
+ **Describe the solution you'd like**
14
+ A clear and concise description of what you want to happen.
15
+
16
+ **Describe alternatives you've considered**
17
+ A clear and concise description of any alternative solutions or features you've considered.
18
+
19
+ **Additional context**
20
+ Add any other context or screenshots about the feature request here.
.github/workflows/stale.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 'Close stale issues and PRs'
2
+ on:
3
+ schedule:
4
+ - cron: '30 1 * * *'
5
+
6
+ jobs:
7
+ stale:
8
+ runs-on: ubuntu-latest
9
+ steps:
10
+ - uses: actions/stale@v8
11
+ with:
12
+ days-before-stale: 30
13
+ days-before-close: 5
.gitignore ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python,visualstudiocode
3
+
4
+ ### Python ###
5
+ # Byte-compiled / optimized / DLL files
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py,cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+ cover/
57
+
58
+ # Translations
59
+ *.mo
60
+ *.pot
61
+
62
+ # Django stuff:
63
+ *.log
64
+ local_settings.py
65
+ db.sqlite3
66
+ db.sqlite3-journal
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ .pybuilder/
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ # For a library or package, you might want to ignore these files since the code is
91
+ # intended to run in multiple environments; otherwise, check them in:
92
+ # .python-version
93
+
94
+ # pipenv
95
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
97
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
98
+ # install all needed dependencies.
99
+ #Pipfile.lock
100
+
101
+ # poetry
102
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
104
+ # commonly ignored for libraries.
105
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106
+ #poetry.lock
107
+
108
+ # pdm
109
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110
+ #pdm.lock
111
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112
+ # in version control.
113
+ # https://pdm.fming.dev/#use-with-ide
114
+ .pdm.toml
115
+
116
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117
+ __pypackages__/
118
+
119
+ # Celery stuff
120
+ celerybeat-schedule
121
+ celerybeat.pid
122
+
123
+ # SageMath parsed files
124
+ *.sage.py
125
+
126
+ # Environments
127
+ .env
128
+ .venv
129
+ env/
130
+ venv/
131
+ ENV/
132
+ env.bak/
133
+ venv.bak/
134
+
135
+ # Spyder project settings
136
+ .spyderproject
137
+ .spyproject
138
+
139
+ # Rope project settings
140
+ .ropeproject
141
+
142
+ # mkdocs documentation
143
+ /site
144
+
145
+ # mypy
146
+ .mypy_cache/
147
+ .dmypy.json
148
+ dmypy.json
149
+
150
+ # Pyre type checker
151
+ .pyre/
152
+
153
+ # pytype static type analyzer
154
+ .pytype/
155
+
156
+ # Cython debug symbols
157
+ cython_debug/
158
+
159
+ # PyCharm
160
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
163
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164
+ #.idea/
165
+
166
+ ### Python Patch ###
167
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
168
+ poetry.toml
169
+
170
+ # ruff
171
+ .ruff_cache/
172
+
173
+ # LSP config files
174
+ pyrightconfig.json
175
+
176
+ ### VisualStudioCode ###
177
+ .vscode/*
178
+ !.vscode/settings.json
179
+ !.vscode/tasks.json
180
+ !.vscode/launch.json
181
+ !.vscode/extensions.json
182
+ !.vscode/*.code-snippets
183
+
184
+ # Local History for Visual Studio Code
185
+ .history/
186
+
187
+ # Built Visual Studio Code Extensions
188
+ *.vsix
189
+
190
+ ### VisualStudioCode Patch ###
191
+ # Ignore all local history of files
192
+ .history
193
+ .ionide
194
+
195
+ # End of https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
196
+ *.ipynb
.pre-commit-config.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
+ hooks:
5
+ - id: trailing-whitespace
6
+ args: [--markdown-linebreak-ext=md]
7
+ - id: end-of-file-fixer
8
+ - id: mixed-line-ending
9
+
10
+ - repo: https://github.com/pycqa/isort
11
+ rev: 5.12.0
12
+ hooks:
13
+ - id: isort
14
+
15
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
16
+ rev: "v0.0.270"
17
+ hooks:
18
+ - id: ruff
19
+ args: [--fix, --exit-non-zero-on-fix]
20
+
21
+ - repo: https://github.com/psf/black
22
+ rev: 23.3.0
23
+ hooks:
24
+ - id: black
CHANGELOG.md ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Changelog
2
+
3
+ ## 2023-05-30
4
+
5
+ - v23.6.0
6
+ - 스크립트의 이름을 `After Detailer`에서 `ADetailer`로 변경
7
+ - API 사용자는 변경 필요함
8
+ - 몇몇 설정 변경
9
+ - `ad_conf` → `ad_confidence`. 0~100 사이의 int → 0.0~1.0 사이의 float
10
+ - `ad_inpaint_full_res` → `ad_inpaint_only_masked`
11
+ - `ad_inpaint_full_res_padding` → `ad_inpaint_only_masked_padding`
12
+ - mediapipe face mesh 모델 추가
13
+ - mediapipe 최소 버전 `0.10.0`
14
+
15
+ - rich traceback 제거함
16
+ - huggingface 다운로드 실패할 때 에러가 나지 않게 하고 해당 모델을 제거함
17
+
18
+ ## 2023-05-26
19
+
20
+ - v23.5.19
21
+ - 1번째 탭에도 `None` 옵션을 추가함
22
+ - api로 ad controlnet model에 inpaint가 아닌 다른 컨트롤넷 모델을 사용하지 못하도록 막음
23
+ - adetailer 진행중에 total tqdm 진행바 업데이트를 멈춤
24
+ - state.inturrupted 상태에서 adetailer 과정을 중지함
25
+ - 컨트롤넷 process를 각 batch가 끝난 순간에만 호출하도록 변경
26
+
27
+ ### 2023-05-25
28
+
29
+ - v23.5.18
30
+ - 컨트롤넷 관련 수정
31
+ - unit의 `input_mode`를 `SIMPLE`로 모두 변경
32
+ - 컨트롤넷 유넷 훅과 하이잭 함수들을 adetailer를 실행할 때에만 되돌리는 기능 추가
33
+ - adetailer 처리가 끝난 뒤 컨트롤넷 스크립트의 process를 다시 진행함. (batch count 2 이상일때의 문제 해결)
34
+ - 기본 활성 스크립트 목록에서 컨트롤넷을 뺌
35
+
36
+ ### 2023-05-22
37
+
38
+ - v23.5.17
39
+ - 컨트롤넷 확장이 있으면 컨트롤넷 스크립트를 활성화함. (컨트롤넷 관련 문제 해결)
40
+ - 모든 컴포넌트에 elem_id 설정
41
+ - ui에 버전을 표시함
42
+
43
+
44
+ ### 2023-05-19
45
+
46
+ - v23.5.16
47
+ - 추가한 옵션
48
+ - Mask min/max ratio
49
+ - Mask merge mode
50
+ - Restore faces after ADetailer
51
+ - 옵션들을 Accordion으로 묶음
52
+
53
+ ### 2023-05-18
54
+
55
+ - v23.5.15
56
+ - 필요한 것만 임포트하도록 변경 (vae 로딩 오류 없어짐. 로딩 속도 빨라짐)
57
+
58
+ ### 2023-05-17
59
+
60
+ - v23.5.14
61
+ - `[SKIP]`으로 ad prompt 일부를 건너뛰는 기능 추가
62
+ - bbox 정렬 옵션 추가
63
+ - sd_webui 타입힌트를 만들어냄
64
+ - enable checker와 관련된 api 오류 수정?
65
+
66
+ ### 2023-05-15
67
+
68
+ - v23.5.13
69
+ - `[SEP]`으로 ad prompt를 분리하여 적용하는 기능 추가
70
+ - enable checker를 다시 pydantic으로 변경함
71
+ - ui 관련 함수를 adetailer.ui 폴더로 분리함
72
+ - controlnet을 사용할 때 모든 controlnet unit 비활성화
73
+ - adetailer 폴더가 없으면 만들게 함
74
+
75
+ ### 2023-05-13
76
+
77
+ - v23.5.12
78
+ - `ad_enable`을 제외한 입력이 dict타입으로 들어오도록 변경
79
+ - web api로 사용할 때에 특히 사용하기 쉬움
80
+ - web api breaking change
81
+ - `mask_preprocess` 인자를 넣지 않았던 오류 수정 (PR #47)
82
+ - huggingface에서 모델을 다운로드하지 않는 옵션 추가 `--ad-no-huggingface`
83
+
84
+ ### 2023-05-12
85
+
86
+ - v23.5.11
87
+ - `ultralytics` 알람 제거
88
+ - 필요없는 exif 인자 더 제거함
89
+ - `use separate steps` 옵션 추가
90
+ - ui 배치를 조정함
91
+
92
+ ### 2023-05-09
93
+
94
+ - v23.5.10
95
+ - 선택한 스크립트만 ADetailer에 적용하는 옵션 추가, 기본값 `True`. 설정 탭에서 지정가능.
96
+ - 기본값: `dynamic_prompting,dynamic_thresholding,wildcards,wildcard_recursive`
97
+ - `person_yolov8s-seg.pt` 모델 추가
98
+ - `ultralytics`의 최소 버전을 `8.0.97`로 설정 (C:\\ 문제 해결된 버전)
99
+
100
+ ### 2023-05-08
101
+
102
+ - v23.5.9
103
+ - 2가지 이상의 모델을 사용할 수 있음. 기본값: 2, 최대: 5
104
+ - segment 모델을 사용할 수 있게 함. `person_yolov8n-seg.pt` 추가
105
+
106
+ ### 2023-05-07
107
+
108
+ - v23.5.8
109
+ - 프롬프트와 네거티브 프롬프트에 방향키 지원 (PR #24)
110
+ - `mask_preprocess`를 추가함. 이전 버전과 시드값이 달라질 가능성 있음!
111
+ - 이미지 처리가 일어났을 때에만 before이미지를 저장함
112
+ - 설정창의 레이블을 ADetailer 대신 더 적절하게 수정함
113
+
114
+ ### 2023-05-06
115
+
116
+ - v23.5.7
117
+ - `ad_use_cfg_scale` 옵션 추가. cfg 스케일을 따로 사용할지 말지 결정함.
118
+ - `ad_enable` 기본값을 `True`에서 `False`로 변경
119
+ - `ad_model`의 기본값을 `None`에서 첫번째 모델로 변경
120
+ - 최소 2개의 입력(ad_enable, ad_model)만 들어오면 작동하게 변경.
121
+
122
+ - v23.5.7.post0
123
+ - `init_controlnet_ext`을 controlnet_exists == True일때에만 실행
124
+ - webui를 C드라이브 바로 밑에 설치한 사람들에게 `ultralytics` 경고 표시
125
+
126
+ ### 2023-05-05 (어린이날)
127
+
128
+ - v23.5.5
129
+ - `Save images before ADetailer` 옵션 추가
130
+ - 입력으로 들어온 인자와 ALL_ARGS의 길이가 다르면 에러메세지
131
+ - README.md에 설치방법 추가
132
+
133
+ - v23.5.6
134
+ - get_args에서 IndexError가 발생하면 자세한 에러메세지를 볼 수 있음
135
+ - AdetailerArgs에 extra_params 내장
136
+ - scripts_args를 딥카피함
137
+ - postprocess_image를 약간 분리함
138
+
139
+ - v23.5.6.post0
140
+ - `init_controlnet_ext`에서 에러메세지를 자세히 볼 수 있음
141
+
142
+ ### 2023-05-04
143
+
144
+ - v23.5.4
145
+ - use pydantic for arguments validation
146
+ - revert: ad_model to `None` as default
147
+ - revert: `__future__` imports
148
+ - lazily import yolo and mediapipe
149
+
150
+ ### 2023-05-03
151
+
152
+ - v23.5.3.post0
153
+ - remove `__future__` imports
154
+ - change to copy scripts and scripts args
155
+
156
+ - v23.5.3.post1
157
+ - change default ad_model from `None`
158
+
159
+ ### 2023-05-02
160
+
161
+ - v23.5.3
162
+ - Remove `None` from model list and add `Enable ADetailer` checkbox.
163
+ - install.py `skip_install` fix.
LICENSE.md ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ GNU AFFERO GENERAL PUBLIC LICENSE
3
+ Version 3, 19 November 2007
4
+
5
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
6
+ Everyone is permitted to copy and distribute verbatim copies
7
+ of this license document, but changing it is not allowed.
8
+
9
+ Preamble
10
+
11
+ The GNU Affero General Public License is a free, copyleft license for
12
+ software and other kinds of works, specifically designed to ensure
13
+ cooperation with the community in the case of network server software.
14
+
15
+ The licenses for most software and other practical works are designed
16
+ to take away your freedom to share and change the works. By contrast,
17
+ our General Public Licenses are intended to guarantee your freedom to
18
+ share and change all versions of a program--to make sure it remains free
19
+ software for all its users.
20
+
21
+ When we speak of free software, we are referring to freedom, not
22
+ price. Our General Public Licenses are designed to make sure that you
23
+ have the freedom to distribute copies of free software (and charge for
24
+ them if you wish), that you receive source code or can get it if you
25
+ want it, that you can change the software or use pieces of it in new
26
+ free programs, and that you know you can do these things.
27
+
28
+ Developers that use our General Public Licenses protect your rights
29
+ with two steps: (1) assert copyright on the software, and (2) offer
30
+ you this License which gives you legal permission to copy, distribute
31
+ and/or modify the software.
32
+
33
+ A secondary benefit of defending all users' freedom is that
34
+ improvements made in alternate versions of the program, if they
35
+ receive widespread use, become available for other developers to
36
+ incorporate. Many developers of free software are heartened and
37
+ encouraged by the resulting cooperation. However, in the case of
38
+ software used on network servers, this result may fail to come about.
39
+ The GNU General Public License permits making a modified version and
40
+ letting the public access it on a server without ever releasing its
41
+ source code to the public.
42
+
43
+ The GNU Affero General Public License is designed specifically to
44
+ ensure that, in such cases, the modified source code becomes available
45
+ to the community. It requires the operator of a network server to
46
+ provide the source code of the modified version running there to the
47
+ users of that server. Therefore, public use of a modified version, on
48
+ a publicly accessible server, gives the public access to the source
49
+ code of the modified version.
50
+
51
+ An older license, called the Affero General Public License and
52
+ published by Affero, was designed to accomplish similar goals. This is
53
+ a different license, not a version of the Affero GPL, but Affero has
54
+ released a new version of the Affero GPL which permits relicensing under
55
+ this license.
56
+
57
+ The precise terms and conditions for copying, distribution and
58
+ modification follow.
59
+
60
+ TERMS AND CONDITIONS
61
+
62
+ 0. Definitions.
63
+
64
+ "This License" refers to version 3 of the GNU Affero General Public License.
65
+
66
+ "Copyright" also means copyright-like laws that apply to other kinds of
67
+ works, such as semiconductor masks.
68
+
69
+ "The Program" refers to any copyrightable work licensed under this
70
+ License. Each licensee is addressed as "you". "Licensees" and
71
+ "recipients" may be individuals or organizations.
72
+
73
+ To "modify" a work means to copy from or adapt all or part of the work
74
+ in a fashion requiring copyright permission, other than the making of an
75
+ exact copy. The resulting work is called a "modified version" of the
76
+ earlier work or a work "based on" the earlier work.
77
+
78
+ A "covered work" means either the unmodified Program or a work based
79
+ on the Program.
80
+
81
+ To "propagate" a work means to do anything with it that, without
82
+ permission, would make you directly or secondarily liable for
83
+ infringement under applicable copyright law, except executing it on a
84
+ computer or modifying a private copy. Propagation includes copying,
85
+ distribution (with or without modification), making available to the
86
+ public, and in some countries other activities as well.
87
+
88
+ To "convey" a work means any kind of propagation that enables other
89
+ parties to make or receive copies. Mere interaction with a user through
90
+ a computer network, with no transfer of a copy, is not conveying.
91
+
92
+ An interactive user interface displays "Appropriate Legal Notices"
93
+ to the extent that it includes a convenient and prominently visible
94
+ feature that (1) displays an appropriate copyright notice, and (2)
95
+ tells the user that there is no warranty for the work (except to the
96
+ extent that warranties are provided), that licensees may convey the
97
+ work under this License, and how to view a copy of this License. If
98
+ the interface presents a list of user commands or options, such as a
99
+ menu, a prominent item in the list meets this criterion.
100
+
101
+ 1. Source Code.
102
+
103
+ The "source code" for a work means the preferred form of the work
104
+ for making modifications to it. "Object code" means any non-source
105
+ form of a work.
106
+
107
+ A "Standard Interface" means an interface that either is an official
108
+ standard defined by a recognized standards body, or, in the case of
109
+ interfaces specified for a particular programming language, one that
110
+ is widely used among developers working in that language.
111
+
112
+ The "System Libraries" of an executable work include anything, other
113
+ than the work as a whole, that (a) is included in the normal form of
114
+ packaging a Major Component, but which is not part of that Major
115
+ Component, and (b) serves only to enable use of the work with that
116
+ Major Component, or to implement a Standard Interface for which an
117
+ implementation is available to the public in source code form. A
118
+ "Major Component", in this context, means a major essential component
119
+ (kernel, window system, and so on) of the specific operating system
120
+ (if any) on which the executable work runs, or a compiler used to
121
+ produce the work, or an object code interpreter used to run it.
122
+
123
+ The "Corresponding Source" for a work in object code form means all
124
+ the source code needed to generate, install, and (for an executable
125
+ work) run the object code and to modify the work, including scripts to
126
+ control those activities. However, it does not include the work's
127
+ System Libraries, or general-purpose tools or generally available free
128
+ programs which are used unmodified in performing those activities but
129
+ which are not part of the work. For example, Corresponding Source
130
+ includes interface definition files associated with source files for
131
+ the work, and the source code for shared libraries and dynamically
132
+ linked subprograms that the work is specifically designed to require,
133
+ such as by intimate data communication or control flow between those
134
+ subprograms and other parts of the work.
135
+
136
+ The Corresponding Source need not include anything that users
137
+ can regenerate automatically from other parts of the Corresponding
138
+ Source.
139
+
140
+ The Corresponding Source for a work in source code form is that
141
+ same work.
142
+
143
+ 2. Basic Permissions.
144
+
145
+ All rights granted under this License are granted for the term of
146
+ copyright on the Program, and are irrevocable provided the stated
147
+ conditions are met. This License explicitly affirms your unlimited
148
+ permission to run the unmodified Program. The output from running a
149
+ covered work is covered by this License only if the output, given its
150
+ content, constitutes a covered work. This License acknowledges your
151
+ rights of fair use or other equivalent, as provided by copyright law.
152
+
153
+ You may make, run and propagate covered works that you do not
154
+ convey, without conditions so long as your license otherwise remains
155
+ in force. You may convey covered works to others for the sole purpose
156
+ of having them make modifications exclusively for you, or provide you
157
+ with facilities for running those works, provided that you comply with
158
+ the terms of this License in conveying all material for which you do
159
+ not control copyright. Those thus making or running the covered works
160
+ for you must do so exclusively on your behalf, under your direction
161
+ and control, on terms that prohibit them from making any copies of
162
+ your copyrighted material outside their relationship with you.
163
+
164
+ Conveying under any other circumstances is permitted solely under
165
+ the conditions stated below. Sublicensing is not allowed; section 10
166
+ makes it unnecessary.
167
+
168
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
169
+
170
+ No covered work shall be deemed part of an effective technological
171
+ measure under any applicable law fulfilling obligations under article
172
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
173
+ similar laws prohibiting or restricting circumvention of such
174
+ measures.
175
+
176
+ When you convey a covered work, you waive any legal power to forbid
177
+ circumvention of technological measures to the extent such circumvention
178
+ is effected by exercising rights under this License with respect to
179
+ the covered work, and you disclaim any intention to limit operation or
180
+ modification of the work as a means of enforcing, against the work's
181
+ users, your or third parties' legal rights to forbid circumvention of
182
+ technological measures.
183
+
184
+ 4. Conveying Verbatim Copies.
185
+
186
+ You may convey verbatim copies of the Program's source code as you
187
+ receive it, in any medium, provided that you conspicuously and
188
+ appropriately publish on each copy an appropriate copyright notice;
189
+ keep intact all notices stating that this License and any
190
+ non-permissive terms added in accord with section 7 apply to the code;
191
+ keep intact all notices of the absence of any warranty; and give all
192
+ recipients a copy of this License along with the Program.
193
+
194
+ You may charge any price or no price for each copy that you convey,
195
+ and you may offer support or warranty protection for a fee.
196
+
197
+ 5. Conveying Modified Source Versions.
198
+
199
+ You may convey a work based on the Program, or the modifications to
200
+ produce it from the Program, in the form of source code under the
201
+ terms of section 4, provided that you also meet all of these conditions:
202
+
203
+ a) The work must carry prominent notices stating that you modified
204
+ it, and giving a relevant date.
205
+
206
+ b) The work must carry prominent notices stating that it is
207
+ released under this License and any conditions added under section
208
+ 7. This requirement modifies the requirement in section 4 to
209
+ "keep intact all notices".
210
+
211
+ c) You must license the entire work, as a whole, under this
212
+ License to anyone who comes into possession of a copy. This
213
+ License will therefore apply, along with any applicable section 7
214
+ additional terms, to the whole of the work, and all its parts,
215
+ regardless of how they are packaged. This License gives no
216
+ permission to license the work in any other way, but it does not
217
+ invalidate such permission if you have separately received it.
218
+
219
+ d) If the work has interactive user interfaces, each must display
220
+ Appropriate Legal Notices; however, if the Program has interactive
221
+ interfaces that do not display Appropriate Legal Notices, your
222
+ work need not make them do so.
223
+
224
+ A compilation of a covered work with other separate and independent
225
+ works, which are not by their nature extensions of the covered work,
226
+ and which are not combined with it such as to form a larger program,
227
+ in or on a volume of a storage or distribution medium, is called an
228
+ "aggregate" if the compilation and its resulting copyright are not
229
+ used to limit the access or legal rights of the compilation's users
230
+ beyond what the individual works permit. Inclusion of a covered work
231
+ in an aggregate does not cause this License to apply to the other
232
+ parts of the aggregate.
233
+
234
+ 6. Conveying Non-Source Forms.
235
+
236
+ You may convey a covered work in object code form under the terms
237
+ of sections 4 and 5, provided that you also convey the
238
+ machine-readable Corresponding Source under the terms of this License,
239
+ in one of these ways:
240
+
241
+ a) Convey the object code in, or embodied in, a physical product
242
+ (including a physical distribution medium), accompanied by the
243
+ Corresponding Source fixed on a durable physical medium
244
+ customarily used for software interchange.
245
+
246
+ b) Convey the object code in, or embodied in, a physical product
247
+ (including a physical distribution medium), accompanied by a
248
+ written offer, valid for at least three years and valid for as
249
+ long as you offer spare parts or customer support for that product
250
+ model, to give anyone who possesses the object code either (1) a
251
+ copy of the Corresponding Source for all the software in the
252
+ product that is covered by this License, on a durable physical
253
+ medium customarily used for software interchange, for a price no
254
+ more than your reasonable cost of physically performing this
255
+ conveying of source, or (2) access to copy the
256
+ Corresponding Source from a network server at no charge.
257
+
258
+ c) Convey individual copies of the object code with a copy of the
259
+ written offer to provide the Corresponding Source. This
260
+ alternative is allowed only occasionally and noncommercially, and
261
+ only if you received the object code with such an offer, in accord
262
+ with subsection 6b.
263
+
264
+ d) Convey the object code by offering access from a designated
265
+ place (gratis or for a charge), and offer equivalent access to the
266
+ Corresponding Source in the same way through the same place at no
267
+ further charge. You need not require recipients to copy the
268
+ Corresponding Source along with the object code. If the place to
269
+ copy the object code is a network server, the Corresponding Source
270
+ may be on a different server (operated by you or a third party)
271
+ that supports equivalent copying facilities, provided you maintain
272
+ clear directions next to the object code saying where to find the
273
+ Corresponding Source. Regardless of what server hosts the
274
+ Corresponding Source, you remain obligated to ensure that it is
275
+ available for as long as needed to satisfy these requirements.
276
+
277
+ e) Convey the object code using peer-to-peer transmission, provided
278
+ you inform other peers where the object code and Corresponding
279
+ Source of the work are being offered to the general public at no
280
+ charge under subsection 6d.
281
+
282
+ A separable portion of the object code, whose source code is excluded
283
+ from the Corresponding Source as a System Library, need not be
284
+ included in conveying the object code work.
285
+
286
+ A "User Product" is either (1) a "consumer product", which means any
287
+ tangible personal property which is normally used for personal, family,
288
+ or household purposes, or (2) anything designed or sold for incorporation
289
+ into a dwelling. In determining whether a product is a consumer product,
290
+ doubtful cases shall be resolved in favor of coverage. For a particular
291
+ product received by a particular user, "normally used" refers to a
292
+ typical or common use of that class of product, regardless of the status
293
+ of the particular user or of the way in which the particular user
294
+ actually uses, or expects or is expected to use, the product. A product
295
+ is a consumer product regardless of whether the product has substantial
296
+ commercial, industrial or non-consumer uses, unless such uses represent
297
+ the only significant mode of use of the product.
298
+
299
+ "Installation Information" for a User Product means any methods,
300
+ procedures, authorization keys, or other information required to install
301
+ and execute modified versions of a covered work in that User Product from
302
+ a modified version of its Corresponding Source. The information must
303
+ suffice to ensure that the continued functioning of the modified object
304
+ code is in no case prevented or interfered with solely because
305
+ modification has been made.
306
+
307
+ If you convey an object code work under this section in, or with, or
308
+ specifically for use in, a User Product, and the conveying occurs as
309
+ part of a transaction in which the right of possession and use of the
310
+ User Product is transferred to the recipient in perpetuity or for a
311
+ fixed term (regardless of how the transaction is characterized), the
312
+ Corresponding Source conveyed under this section must be accompanied
313
+ by the Installation Information. But this requirement does not apply
314
+ if neither you nor any third party retains the ability to install
315
+ modified object code on the User Product (for example, the work has
316
+ been installed in ROM).
317
+
318
+ The requirement to provide Installation Information does not include a
319
+ requirement to continue to provide support service, warranty, or updates
320
+ for a work that has been modified or installed by the recipient, or for
321
+ the User Product in which it has been modified or installed. Access to a
322
+ network may be denied when the modification itself materially and
323
+ adversely affects the operation of the network or violates the rules and
324
+ protocols for communication across the network.
325
+
326
+ Corresponding Source conveyed, and Installation Information provided,
327
+ in accord with this section must be in a format that is publicly
328
+ documented (and with an implementation available to the public in
329
+ source code form), and must require no special password or key for
330
+ unpacking, reading or copying.
331
+
332
+ 7. Additional Terms.
333
+
334
+ "Additional permissions" are terms that supplement the terms of this
335
+ License by making exceptions from one or more of its conditions.
336
+ Additional permissions that are applicable to the entire Program shall
337
+ be treated as though they were included in this License, to the extent
338
+ that they are valid under applicable law. If additional permissions
339
+ apply only to part of the Program, that part may be used separately
340
+ under those permissions, but the entire Program remains governed by
341
+ this License without regard to the additional permissions.
342
+
343
+ When you convey a copy of a covered work, you may at your option
344
+ remove any additional permissions from that copy, or from any part of
345
+ it. (Additional permissions may be written to require their own
346
+ removal in certain cases when you modify the work.) You may place
347
+ additional permissions on material, added by you to a covered work,
348
+ for which you have or can give appropriate copyright permission.
349
+
350
+ Notwithstanding any other provision of this License, for material you
351
+ add to a covered work, you may (if authorized by the copyright holders of
352
+ that material) supplement the terms of this License with terms:
353
+
354
+ a) Disclaiming warranty or limiting liability differently from the
355
+ terms of sections 15 and 16 of this License; or
356
+
357
+ b) Requiring preservation of specified reasonable legal notices or
358
+ author attributions in that material or in the Appropriate Legal
359
+ Notices displayed by works containing it; or
360
+
361
+ c) Prohibiting misrepresentation of the origin of that material, or
362
+ requiring that modified versions of such material be marked in
363
+ reasonable ways as different from the original version; or
364
+
365
+ d) Limiting the use for publicity purposes of names of licensors or
366
+ authors of the material; or
367
+
368
+ e) Declining to grant rights under trademark law for use of some
369
+ trade names, trademarks, or service marks; or
370
+
371
+ f) Requiring indemnification of licensors and authors of that
372
+ material by anyone who conveys the material (or modified versions of
373
+ it) with contractual assumptions of liability to the recipient, for
374
+ any liability that these contractual assumptions directly impose on
375
+ those licensors and authors.
376
+
377
+ All other non-permissive additional terms are considered "further
378
+ restrictions" within the meaning of section 10. If the Program as you
379
+ received it, or any part of it, contains a notice stating that it is
380
+ governed by this License along with a term that is a further
381
+ restriction, you may remove that term. If a license document contains
382
+ a further restriction but permits relicensing or conveying under this
383
+ License, you may add to a covered work material governed by the terms
384
+ of that license document, provided that the further restriction does
385
+ not survive such relicensing or conveying.
386
+
387
+ If you add terms to a covered work in accord with this section, you
388
+ must place, in the relevant source files, a statement of the
389
+ additional terms that apply to those files, or a notice indicating
390
+ where to find the applicable terms.
391
+
392
+ Additional terms, permissive or non-permissive, may be stated in the
393
+ form of a separately written license, or stated as exceptions;
394
+ the above requirements apply either way.
395
+
396
+ 8. Termination.
397
+
398
+ You may not propagate or modify a covered work except as expressly
399
+ provided under this License. Any attempt otherwise to propagate or
400
+ modify it is void, and will automatically terminate your rights under
401
+ this License (including any patent licenses granted under the third
402
+ paragraph of section 11).
403
+
404
+ However, if you cease all violation of this License, then your
405
+ license from a particular copyright holder is reinstated (a)
406
+ provisionally, unless and until the copyright holder explicitly and
407
+ finally terminates your license, and (b) permanently, if the copyright
408
+ holder fails to notify you of the violation by some reasonable means
409
+ prior to 60 days after the cessation.
410
+
411
+ Moreover, your license from a particular copyright holder is
412
+ reinstated permanently if the copyright holder notifies you of the
413
+ violation by some reasonable means, this is the first time you have
414
+ received notice of violation of this License (for any work) from that
415
+ copyright holder, and you cure the violation prior to 30 days after
416
+ your receipt of the notice.
417
+
418
+ Termination of your rights under this section does not terminate the
419
+ licenses of parties who have received copies or rights from you under
420
+ this License. If your rights have been terminated and not permanently
421
+ reinstated, you do not qualify to receive new licenses for the same
422
+ material under section 10.
423
+
424
+ 9. Acceptance Not Required for Having Copies.
425
+
426
+ You are not required to accept this License in order to receive or
427
+ run a copy of the Program. Ancillary propagation of a covered work
428
+ occurring solely as a consequence of using peer-to-peer transmission
429
+ to receive a copy likewise does not require acceptance. However,
430
+ nothing other than this License grants you permission to propagate or
431
+ modify any covered work. These actions infringe copyright if you do
432
+ not accept this License. Therefore, by modifying or propagating a
433
+ covered work, you indicate your acceptance of this License to do so.
434
+
435
+ 10. Automatic Licensing of Downstream Recipients.
436
+
437
+ Each time you convey a covered work, the recipient automatically
438
+ receives a license from the original licensors, to run, modify and
439
+ propagate that work, subject to this License. You are not responsible
440
+ for enforcing compliance by third parties with this License.
441
+
442
+ An "entity transaction" is a transaction transferring control of an
443
+ organization, or substantially all assets of one, or subdividing an
444
+ organization, or merging organizations. If propagation of a covered
445
+ work results from an entity transaction, each party to that
446
+ transaction who receives a copy of the work also receives whatever
447
+ licenses to the work the party's predecessor in interest had or could
448
+ give under the previous paragraph, plus a right to possession of the
449
+ Corresponding Source of the work from the predecessor in interest, if
450
+ the predecessor has it or can get it with reasonable efforts.
451
+
452
+ You may not impose any further restrictions on the exercise of the
453
+ rights granted or affirmed under this License. For example, you may
454
+ not impose a license fee, royalty, or other charge for exercise of
455
+ rights granted under this License, and you may not initiate litigation
456
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
457
+ any patent claim is infringed by making, using, selling, offering for
458
+ sale, or importing the Program or any portion of it.
459
+
460
+ 11. Patents.
461
+
462
+ A "contributor" is a copyright holder who authorizes use under this
463
+ License of the Program or a work on which the Program is based. The
464
+ work thus licensed is called the contributor's "contributor version".
465
+
466
+ A contributor's "essential patent claims" are all patent claims
467
+ owned or controlled by the contributor, whether already acquired or
468
+ hereafter acquired, that would be infringed by some manner, permitted
469
+ by this License, of making, using, or selling its contributor version,
470
+ but do not include claims that would be infringed only as a
471
+ consequence of further modification of the contributor version. For
472
+ purposes of this definition, "control" includes the right to grant
473
+ patent sublicenses in a manner consistent with the requirements of
474
+ this License.
475
+
476
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
477
+ patent license under the contributor's essential patent claims, to
478
+ make, use, sell, offer for sale, import and otherwise run, modify and
479
+ propagate the contents of its contributor version.
480
+
481
+ In the following three paragraphs, a "patent license" is any express
482
+ agreement or commitment, however denominated, not to enforce a patent
483
+ (such as an express permission to practice a patent or covenant not to
484
+ sue for patent infringement). To "grant" such a patent license to a
485
+ party means to make such an agreement or commitment not to enforce a
486
+ patent against the party.
487
+
488
+ If you convey a covered work, knowingly relying on a patent license,
489
+ and the Corresponding Source of the work is not available for anyone
490
+ to copy, free of charge and under the terms of this License, through a
491
+ publicly available network server or other readily accessible means,
492
+ then you must either (1) cause the Corresponding Source to be so
493
+ available, or (2) arrange to deprive yourself of the benefit of the
494
+ patent license for this particular work, or (3) arrange, in a manner
495
+ consistent with the requirements of this License, to extend the patent
496
+ license to downstream recipients. "Knowingly relying" means you have
497
+ actual knowledge that, but for the patent license, your conveying the
498
+ covered work in a country, or your recipient's use of the covered work
499
+ in a country, would infringe one or more identifiable patents in that
500
+ country that you have reason to believe are valid.
501
+
502
+ If, pursuant to or in connection with a single transaction or
503
+ arrangement, you convey, or propagate by procuring conveyance of, a
504
+ covered work, and grant a patent license to some of the parties
505
+ receiving the covered work authorizing them to use, propagate, modify
506
+ or convey a specific copy of the covered work, then the patent license
507
+ you grant is automatically extended to all recipients of the covered
508
+ work and works based on it.
509
+
510
+ A patent license is "discriminatory" if it does not include within
511
+ the scope of its coverage, prohibits the exercise of, or is
512
+ conditioned on the non-exercise of one or more of the rights that are
513
+ specifically granted under this License. You may not convey a covered
514
+ work if you are a party to an arrangement with a third party that is
515
+ in the business of distributing software, under which you make payment
516
+ to the third party based on the extent of your activity of conveying
517
+ the work, and under which the third party grants, to any of the
518
+ parties who would receive the covered work from you, a discriminatory
519
+ patent license (a) in connection with copies of the covered work
520
+ conveyed by you (or copies made from those copies), or (b) primarily
521
+ for and in connection with specific products or compilations that
522
+ contain the covered work, unless you entered into that arrangement,
523
+ or that patent license was granted, prior to 28 March 2007.
524
+
525
+ Nothing in this License shall be construed as excluding or limiting
526
+ any implied license or other defenses to infringement that may
527
+ otherwise be available to you under applicable patent law.
528
+
529
+ 12. No Surrender of Others' Freedom.
530
+
531
+ If conditions are imposed on you (whether by court order, agreement or
532
+ otherwise) that contradict the conditions of this License, they do not
533
+ excuse you from the conditions of this License. If you cannot convey a
534
+ covered work so as to satisfy simultaneously your obligations under this
535
+ License and any other pertinent obligations, then as a consequence you may
536
+ not convey it at all. For example, if you agree to terms that obligate you
537
+ to collect a royalty for further conveying from those to whom you convey
538
+ the Program, the only way you could satisfy both those terms and this
539
+ License would be to refrain entirely from conveying the Program.
540
+
541
+ 13. Remote Network Interaction; Use with the GNU General Public License.
542
+
543
+ Notwithstanding any other provision of this License, if you modify the
544
+ Program, your modified version must prominently offer all users
545
+ interacting with it remotely through a computer network (if your version
546
+ supports such interaction) an opportunity to receive the Corresponding
547
+ Source of your version by providing access to the Corresponding Source
548
+ from a network server at no charge, through some standard or customary
549
+ means of facilitating copying of software. This Corresponding Source
550
+ shall include the Corresponding Source for any work covered by version 3
551
+ of the GNU General Public License that is incorporated pursuant to the
552
+ following paragraph.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the work with which it is combined will remain governed by version
560
+ 3 of the GNU General Public License.
561
+
562
+ 14. Revised Versions of this License.
563
+
564
+ The Free Software Foundation may publish revised and/or new versions of
565
+ the GNU Affero General Public License from time to time. Such new versions
566
+ will be similar in spirit to the present version, but may differ in detail to
567
+ address new problems or concerns.
568
+
569
+ Each version is given a distinguishing version number. If the
570
+ Program specifies that a certain numbered version of the GNU Affero General
571
+ Public License "or any later version" applies to it, you have the
572
+ option of following the terms and conditions either of that numbered
573
+ version or of any later version published by the Free Software
574
+ Foundation. If the Program does not specify a version number of the
575
+ GNU Affero General Public License, you may choose any version ever published
576
+ by the Free Software Foundation.
577
+
578
+ If the Program specifies that a proxy can decide which future
579
+ versions of the GNU Affero General Public License can be used, that proxy's
580
+ public statement of acceptance of a version permanently authorizes you
581
+ to choose that version for the Program.
582
+
583
+ Later license versions may give you additional or different
584
+ permissions. However, no additional obligations are imposed on any
585
+ author or copyright holder as a result of your choosing to follow a
586
+ later version.
587
+
588
+ 15. Disclaimer of Warranty.
589
+
590
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
591
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
592
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
593
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
594
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
595
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
596
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
597
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
598
+
599
+ 16. Limitation of Liability.
600
+
601
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
602
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
603
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
604
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
605
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
606
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
607
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
608
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
609
+ SUCH DAMAGES.
610
+
611
+ 17. Interpretation of Sections 15 and 16.
612
+
613
+ If the disclaimer of warranty and limitation of liability provided
614
+ above cannot be given local legal effect according to their terms,
615
+ reviewing courts shall apply local law that most closely approximates
616
+ an absolute waiver of all civil liability in connection with the
617
+ Program, unless a warranty or assumption of liability accompanies a
618
+ copy of the Program in return for a fee.
619
+
620
+ END OF TERMS AND CONDITIONS
621
+
622
+ How to Apply These Terms to Your New Programs
623
+
624
+ If you develop a new program, and you want it to be of the greatest
625
+ possible use to the public, the best way to achieve this is to make it
626
+ free software which everyone can redistribute and change under these terms.
627
+
628
+ To do so, attach the following notices to the program. It is safest
629
+ to attach them to the start of each source file to most effectively
630
+ state the exclusion of warranty; and each file should have at least
631
+ the "copyright" line and a pointer to where the full notice is found.
632
+
633
+ <one line to give the program's name and a brief idea of what it does.>
634
+ Copyright (C) <year> <name of author>
635
+
636
+ This program is free software: you can redistribute it and/or modify
637
+ it under the terms of the GNU Affero General Public License as published
638
+ by the Free Software Foundation, either version 3 of the License, or
639
+ (at your option) any later version.
640
+
641
+ This program is distributed in the hope that it will be useful,
642
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
643
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
644
+ GNU Affero General Public License for more details.
645
+
646
+ You should have received a copy of the GNU Affero General Public License
647
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
648
+
649
+ Also add information on how to contact you by electronic and paper mail.
650
+
651
+ If your software can interact with users remotely through a computer
652
+ network, you should also make sure that it provides a way for users to
653
+ get its source. For example, if your program is a web application, its
654
+ interface could display a "Source" link that leads users to an archive
655
+ of the code. There are many ways you could offer source, and different
656
+ solutions will be better for different programs; see section 13 for the
657
+ specific requirements.
658
+
659
+ You should also get your employer (if you work as a programmer) or school,
660
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
661
+ For more information on this, and how to apply and follow the GNU AGPL, see
662
+ <http://www.gnu.org/licenses/>.
README.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !After Detailer
2
+
3
+ !After Detailer is a extension for stable diffusion webui, similar to Detection Detailer, except it uses ultralytics instead of the mmdet.
4
+
5
+ ## Install
6
+
7
+ (from Mikubill/sd-webui-controlnet)
8
+
9
+ 1. Open "Extensions" tab.
10
+ 2. Open "Install from URL" tab in the tab.
11
+ 3. Enter `https://github.com/Bing-su/adetailer.git` to "URL for extension's git repository".
12
+ 4. Press "Install" button.
13
+ 5. Wait 5 seconds, and you will see the message "Installed into stable-diffusion-webui\extensions\adetailer. Use Installed tab to restart".
14
+ 6. Go to "Installed" tab, click "Check for updates", and then click "Apply and restart UI". (The next time you can also use this method to update extensions.)
15
+ 7. Completely restart A1111 webui including your terminal. (If you do not know what is a "terminal", you can reboot your computer: turn your computer off and turn it on again.)
16
+
17
+ You can now install it directly from the Extensions tab.
18
+
19
+ ![image](https://i.imgur.com/g6GdRBT.png)
20
+
21
+ You **DON'T** need to download any model from huggingface.
22
+
23
+ ## Options
24
+
25
+ | Model, Prompts | | |
26
+ | --------------------------------- | ------------------------------------- | ------------------------------------------------- |
27
+ | ADetailer model | Determine what to detect. | `None` = disable |
28
+ | ADetailer prompt, negative prompt | Prompts and negative prompts to apply | If left blank, it will use the same as the input. |
29
+
30
+ | Detection | | |
31
+ | ------------------------------------ | -------------------------------------------------------------------------------------------- | --- |
32
+ | Detection model confidence threshold | Only objects with a detection model confidence above this threshold are used for inpainting. | |
33
+ | Mask min/max ratio | Only use masks whose area is between those ratios for the area of the entire image. | |
34
+
35
+ If you want to exclude objects in the background, try setting the min ratio to around `0.01`.
36
+
37
+ | Mask Preprocessing | | |
38
+ | ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
39
+ | Mask x, y offset | Moves the mask horizontally and vertically by | |
40
+ | Mask erosion (-) / dilation (+) | Enlarge or reduce the detected mask. | [opencv example](https://docs.opencv.org/4.7.0/db/df6/tutorial_erosion_dilatation.html) |
41
+ | Mask merge mode | `None`: Inpaint each mask<br/>`Merge`: Merge all masks and inpaint<br/>`Merge and Invert`: Merge all masks and Invert, then inpaint | |
42
+
43
+ Applied in this order: x, y offset → erosion/dilation → merge/invert.
44
+
45
+ #### Inpainting
46
+
47
+ ![image](https://i.imgur.com/wyWlT1n.png)
48
+
49
+ Each option corresponds to a corresponding option on the inpaint tab.
50
+
51
+ ## ControlNet Inpainting
52
+
53
+ You can use the ControlNet inpaint extension if you have ControlNet installed and a ControlNet inpaint model.
54
+
55
+ On the ControlNet tab, select a ControlNet inpaint model and set the model weights.
56
+
57
+ ## Model
58
+
59
+ | Model | Target | mAP 50 | mAP 50-95 |
60
+ | --------------------- | --------------------- | ----------------------------- | ----------------------------- |
61
+ | face_yolov8n.pt | 2D / realistic face | 0.660 | 0.366 |
62
+ | face_yolov8s.pt | 2D / realistic face | 0.713 | 0.404 |
63
+ | hand_yolov8n.pt | 2D / realistic hand | 0.767 | 0.505 |
64
+ | person_yolov8n-seg.pt | 2D / realistic person | 0.782 (bbox)<br/>0.761 (mask) | 0.555 (bbox)<br/>0.460 (mask) |
65
+ | person_yolov8s-seg.pt | 2D / realistic person | 0.824 (bbox)<br/>0.809 (mask) | 0.605 (bbox)<br/>0.508 (mask) |
66
+ | mediapipe_face_full | realistic face | - | - |
67
+ | mediapipe_face_short | realistic face | - | - |
68
+ | mediapipe_face_mesh | realistic face | - | - |
69
+
70
+ The yolo models can be found on huggingface [Bingsu/adetailer](https://huggingface.co/Bingsu/adetailer).
71
+
72
+ ### User Model
73
+
74
+ Put your [ultralytics](https://github.com/ultralytics/ultralytics) model in `webui/models/adetailer`. The model name should end with `.pt` or `.pth`.
75
+
76
+ It must be a bbox detection or segment model and use all label.
77
+
78
+ ### Dataset
79
+
80
+ Datasets used for training the yolo models are:
81
+
82
+ #### Face
83
+
84
+ - [Anime Face CreateML](https://universe.roboflow.com/my-workspace-mph8o/anime-face-createml)
85
+ - [xml2txt](https://universe.roboflow.com/0oooooo0/xml2txt-njqx1)
86
+ - [AN](https://universe.roboflow.com/sed-b8vkf/an-lfg5i)
87
+ - [wider face](http://shuoyang1213.me/WIDERFACE/index.html)
88
+
89
+ #### Hand
90
+
91
+ - [AnHDet](https://universe.roboflow.com/1-yshhi/anhdet)
92
+ - [hand-detection-fuao9](https://universe.roboflow.com/catwithawand/hand-detection-fuao9)
93
+
94
+ #### Person
95
+
96
+ - [coco2017](https://cocodataset.org/#home) (only person)
97
+ - [AniSeg](https://github.com/jerryli27/AniSeg)
98
+ - [skytnt/anime-segmentation](https://huggingface.co/datasets/skytnt/anime-segmentation)
99
+
100
+ ## Example
101
+
102
+ ![image](https://i.imgur.com/38RSxSO.png)
103
+ ![image](https://i.imgur.com/2CYgjLx.png)
104
+
105
+ [![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/F1F1L7V2N)
adetailer/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .__version__ import __version__
2
+ from .args import AD_ENABLE, ALL_ARGS, ADetailerArgs, EnableChecker
3
+ from .common import PredictOutput, get_models
4
+ from .mediapipe import mediapipe_predict
5
+ from .ultralytics import ultralytics_predict
6
+
7
+ AFTER_DETAILER = "ADetailer"
8
+
9
+ __all__ = [
10
+ "__version__",
11
+ "AD_ENABLE",
12
+ "ADetailerArgs",
13
+ "AFTER_DETAILER",
14
+ "ALL_ARGS",
15
+ "EnableChecker",
16
+ "PredictOutput",
17
+ "get_models",
18
+ "mediapipe_predict",
19
+ "ultralytics_predict",
20
+ ]
adetailer/__version__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "23.6.0"
adetailer/args.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import UserList
4
+ from functools import cached_property, partial
5
+ from typing import Any, Literal, NamedTuple, Union
6
+
7
+ import pydantic
8
+ from pydantic import (
9
+ BaseModel,
10
+ Extra,
11
+ NonNegativeFloat,
12
+ NonNegativeInt,
13
+ PositiveInt,
14
+ confloat,
15
+ constr,
16
+ )
17
+
18
+
19
+ class Arg(NamedTuple):
20
+ attr: str
21
+ name: str
22
+
23
+
24
+ class ArgsList(UserList):
25
+ @cached_property
26
+ def attrs(self) -> tuple[str]:
27
+ return tuple(attr for attr, _ in self)
28
+
29
+ @cached_property
30
+ def names(self) -> tuple[str]:
31
+ return tuple(name for _, name in self)
32
+
33
+
34
+ class ADetailerArgs(BaseModel, extra=Extra.forbid):
35
+ ad_model: str = "None"
36
+ ad_prompt: str = ""
37
+ ad_negative_prompt: str = ""
38
+ ad_confidence: confloat(ge=0.0, le=1.0) = 0.3
39
+ ad_mask_min_ratio: confloat(ge=0.0, le=1.0) = 0.0
40
+ ad_mask_max_ratio: confloat(ge=0.0, le=1.0) = 1.0
41
+ ad_dilate_erode: int = 32
42
+ ad_x_offset: int = 0
43
+ ad_y_offset: int = 0
44
+ ad_mask_merge_invert: Literal["None", "Merge", "Merge and Invert"] = "None"
45
+ ad_mask_blur: NonNegativeInt = 4
46
+ ad_denoising_strength: confloat(ge=0.0, le=1.0) = 0.4
47
+ ad_inpaint_only_masked: bool = True
48
+ ad_inpaint_only_masked_padding: NonNegativeInt = 0
49
+ ad_use_inpaint_width_height: bool = False
50
+ ad_inpaint_width: PositiveInt = 512
51
+ ad_inpaint_height: PositiveInt = 512
52
+ ad_use_steps: bool = False
53
+ ad_steps: PositiveInt = 28
54
+ ad_use_cfg_scale: bool = False
55
+ ad_cfg_scale: NonNegativeFloat = 7.0
56
+ ad_restore_face: bool = False
57
+ ad_controlnet_model: constr(regex=r".*(inpaint|tile|scribble|lineart|openpose).*|^None$") = "None"
58
+ ad_controlnet_weight: confloat(ge=0.0, le=1.0) = 1.0
59
+ ad_controlnet_guidance_end: confloat(ge=0.0, le=1.0) = 1.0
60
+
61
+ @staticmethod
62
+ def ppop(
63
+ p: dict[str, Any],
64
+ key: str,
65
+ pops: list[str] | None = None,
66
+ cond: Any = None,
67
+ ):
68
+ if pops is None:
69
+ pops = [key]
70
+ value = p[key]
71
+ cond = (not bool(value)) if cond is None else value == cond
72
+
73
+ if cond:
74
+ for k in pops:
75
+ p.pop(k)
76
+
77
+ def extra_params(self, suffix: str = ""):
78
+ if self.ad_model == "None":
79
+ return {}
80
+
81
+ p = {name: getattr(self, attr) for attr, name in ALL_ARGS}
82
+ ppop = partial(self.ppop, p)
83
+
84
+ ppop("ADetailer prompt")
85
+ ppop("ADetailer negative prompt")
86
+ ppop("ADetailer mask min ratio", cond=0.0)
87
+ ppop("ADetailer mask max ratio", cond=1.0)
88
+ ppop("ADetailer x offset", cond=0)
89
+ ppop("ADetailer y offset", cond=0)
90
+ ppop("ADetailer mask merge/invert", cond="None")
91
+ ppop("ADetailer inpaint only masked", ["ADetailer inpaint padding"])
92
+ ppop(
93
+ "ADetailer use inpaint width/height",
94
+ [
95
+ "ADetailer use inpaint width/height",
96
+ "ADetailer inpaint width",
97
+ "ADetailer inpaint height",
98
+ ],
99
+ )
100
+ ppop(
101
+ "ADetailer use separate steps",
102
+ ["ADetailer use separate steps", "ADetailer steps"],
103
+ )
104
+ ppop(
105
+ "ADetailer use separate CFG scale",
106
+ ["ADetailer use separate CFG scale", "ADetailer CFG scale"],
107
+ )
108
+ ppop("ADetailer restore face")
109
+ ppop(
110
+ "ADetailer ControlNet model",
111
+ ["ADetailer ControlNet model", "ADetailer ControlNet weight", "ADetailer ControlNet guidance end"],
112
+ cond="None",
113
+ )
114
+
115
+ if suffix:
116
+ p = {k + suffix: v for k, v in p.items()}
117
+
118
+ return p
119
+
120
+
121
+ class EnableChecker(BaseModel):
122
+ a0: Union[bool, dict]
123
+ a1: Any
124
+
125
+ def is_enabled(self) -> bool:
126
+ ad_model = ALL_ARGS[0].attr
127
+ if isinstance(self.a0, dict):
128
+ return self.a0.get(ad_model, "None") != "None"
129
+ if not isinstance(self.a1, dict):
130
+ return False
131
+ return self.a0 and self.a1.get(ad_model, "None") != "None"
132
+
133
+
134
+ _all_args = [
135
+ ("ad_enable", "ADetailer enable"),
136
+ ("ad_model", "ADetailer model"),
137
+ ("ad_prompt", "ADetailer prompt"),
138
+ ("ad_negative_prompt", "ADetailer negative prompt"),
139
+ ("ad_confidence", "ADetailer confidence"),
140
+ ("ad_mask_min_ratio", "ADetailer mask min ratio"),
141
+ ("ad_mask_max_ratio", "ADetailer mask max ratio"),
142
+ ("ad_x_offset", "ADetailer x offset"),
143
+ ("ad_y_offset", "ADetailer y offset"),
144
+ ("ad_dilate_erode", "ADetailer dilate/erode"),
145
+ ("ad_mask_merge_invert", "ADetailer mask merge/invert"),
146
+ ("ad_mask_blur", "ADetailer mask blur"),
147
+ ("ad_denoising_strength", "ADetailer denoising strength"),
148
+ ("ad_inpaint_only_masked", "ADetailer inpaint only masked"),
149
+ ("ad_inpaint_only_masked_padding", "ADetailer inpaint padding"),
150
+ ("ad_use_inpaint_width_height", "ADetailer use inpaint width/height"),
151
+ ("ad_inpaint_width", "ADetailer inpaint width"),
152
+ ("ad_inpaint_height", "ADetailer inpaint height"),
153
+ ("ad_use_steps", "ADetailer use separate steps"),
154
+ ("ad_steps", "ADetailer steps"),
155
+ ("ad_use_cfg_scale", "ADetailer use separate CFG scale"),
156
+ ("ad_cfg_scale", "ADetailer CFG scale"),
157
+ ("ad_restore_face", "ADetailer restore face"),
158
+ ("ad_controlnet_model", "ADetailer ControlNet model"),
159
+ ("ad_controlnet_weight", "ADetailer ControlNet weight"),
160
+ ("ad_controlnet_guidance_end", "ADetailer ControlNet guidance end"),
161
+ ]
162
+
163
+ AD_ENABLE = Arg(*_all_args[0])
164
+ _args = [Arg(*args) for args in _all_args[1:]]
165
+ ALL_ARGS = ArgsList(_args)
166
+
167
+ BBOX_SORTBY = [
168
+ "None",
169
+ "Position (left to right)",
170
+ "Position (center to edge)",
171
+ "Area (large to small)",
172
+ ]
173
+ MASK_MERGE_INVERT = ["None", "Merge", "Merge and Invert"]
adetailer/common.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import OrderedDict
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+ from typing import Optional, Union
7
+
8
+ from huggingface_hub import hf_hub_download
9
+ from PIL import Image, ImageDraw
10
+
11
+ repo_id = "Bingsu/adetailer"
12
+
13
+
14
+ @dataclass
15
+ class PredictOutput:
16
+ bboxes: list[list[int | float]] = field(default_factory=list)
17
+ masks: list[Image.Image] = field(default_factory=list)
18
+ preview: Optional[Image.Image] = None
19
+
20
+
21
+ def hf_download(file: str):
22
+ try:
23
+ path = hf_hub_download(repo_id, file)
24
+ except Exception:
25
+ path = "INVALID"
26
+ return path
27
+
28
+
29
+ def get_models(
30
+ model_dir: Union[str, Path], huggingface: bool = True
31
+ ) -> OrderedDict[str, Optional[str]]:
32
+ model_dir = Path(model_dir)
33
+ if model_dir.is_dir():
34
+ model_paths = [
35
+ p
36
+ for p in model_dir.rglob("*")
37
+ if p.is_file() and p.suffix in (".pt", ".pth")
38
+ ]
39
+ else:
40
+ model_paths = []
41
+
42
+ models = OrderedDict()
43
+ if huggingface:
44
+ models.update(
45
+ {
46
+ "face_yolov8n.pt": hf_download("face_yolov8n.pt"),
47
+ "face_yolov8s.pt": hf_download("face_yolov8s.pt"),
48
+ "hand_yolov8n.pt": hf_download("hand_yolov8n.pt"),
49
+ "person_yolov8n-seg.pt": hf_download("person_yolov8n-seg.pt"),
50
+ "person_yolov8s-seg.pt": hf_download("person_yolov8s-seg.pt"),
51
+ }
52
+ )
53
+ models.update(
54
+ {
55
+ "mediapipe_face_full": None,
56
+ "mediapipe_face_short": None,
57
+ "mediapipe_face_mesh": None,
58
+ }
59
+ )
60
+
61
+ invalid_keys = [k for k, v in models.items() if v == "INVALID"]
62
+ for key in invalid_keys:
63
+ models.pop(key)
64
+
65
+ for path in model_paths:
66
+ if path.name in models:
67
+ continue
68
+ models[path.name] = str(path)
69
+
70
+ return models
71
+
72
+
73
+ def create_mask_from_bbox(
74
+ bboxes: list[list[float]], shape: tuple[int, int]
75
+ ) -> list[Image.Image]:
76
+ """
77
+ Parameters
78
+ ----------
79
+ bboxes: list[list[float]]
80
+ list of [x1, y1, x2, y2]
81
+ bounding boxes
82
+ shape: tuple[int, int]
83
+ shape of the image (width, height)
84
+
85
+ Returns
86
+ -------
87
+ masks: list[Image.Image]
88
+ A list of masks
89
+
90
+ """
91
+ masks = []
92
+ for bbox in bboxes:
93
+ mask = Image.new("L", shape, 0)
94
+ mask_draw = ImageDraw.Draw(mask)
95
+ mask_draw.rectangle(bbox, fill=255)
96
+ masks.append(mask)
97
+ return masks
98
+
99
+
100
+ def create_bbox_from_mask(
101
+ masks: list[Image.Image], shape: tuple[int, int]
102
+ ) -> list[list[int]]:
103
+ """
104
+ Parameters
105
+ ----------
106
+ masks: list[Image.Image]
107
+ A list of masks
108
+ shape: tuple[int, int]
109
+ shape of the image (width, height)
110
+
111
+ Returns
112
+ -------
113
+ bboxes: list[list[float]]
114
+ A list of bounding boxes
115
+
116
+ """
117
+ bboxes = []
118
+ for mask in masks:
119
+ mask = mask.resize(shape)
120
+ bbox = mask.getbbox()
121
+ if bbox is not None:
122
+ bboxes.append(list(bbox))
123
+ return bboxes
adetailer/mask.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from enum import IntEnum
4
+ from functools import partial, reduce
5
+ from math import dist
6
+
7
+ import cv2
8
+ import numpy as np
9
+ from PIL import Image, ImageChops
10
+
11
+ from adetailer.args import MASK_MERGE_INVERT
12
+ from adetailer.common import PredictOutput
13
+
14
+
15
+ class SortBy(IntEnum):
16
+ NONE = 0
17
+ LEFT_TO_RIGHT = 1
18
+ CENTER_TO_EDGE = 2
19
+ AREA = 3
20
+
21
+
22
+ class MergeInvert(IntEnum):
23
+ NONE = 0
24
+ MERGE = 1
25
+ MERGE_INVERT = 2
26
+
27
+
28
+ def _dilate(arr: np.ndarray, value: int) -> np.ndarray:
29
+ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
30
+ return cv2.dilate(arr, kernel, iterations=1)
31
+
32
+
33
+ def _erode(arr: np.ndarray, value: int) -> np.ndarray:
34
+ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
35
+ return cv2.erode(arr, kernel, iterations=1)
36
+
37
+
38
+ def dilate_erode(img: Image.Image, value: int) -> Image.Image:
39
+ """
40
+ The dilate_erode function takes an image and a value.
41
+ If the value is positive, it dilates the image by that amount.
42
+ If the value is negative, it erodes the image by that amount.
43
+
44
+ Parameters
45
+ ----------
46
+ img: PIL.Image.Image
47
+ the image to be processed
48
+ value: int
49
+ kernel size of dilation or erosion
50
+
51
+ Returns
52
+ -------
53
+ PIL.Image.Image
54
+ The image that has been dilated or eroded
55
+ """
56
+ if value == 0:
57
+ return img
58
+
59
+ arr = np.array(img)
60
+ arr = _dilate(arr, value) if value > 0 else _erode(arr, -value)
61
+
62
+ return Image.fromarray(arr)
63
+
64
+
65
+ def offset(img: Image.Image, x: int = 0, y: int = 0) -> Image.Image:
66
+ """
67
+ The offset function takes an image and offsets it by a given x(→) and y(↑) value.
68
+
69
+ Parameters
70
+ ----------
71
+ mask: Image.Image
72
+ Pass the mask image to the function
73
+ x: int
74
+
75
+ y: int
76
+
77
+
78
+ Returns
79
+ -------
80
+ PIL.Image.Image
81
+ A new image that is offset by x and y
82
+ """
83
+ return ImageChops.offset(img, x, -y)
84
+
85
+
86
+ def is_all_black(img: Image.Image) -> bool:
87
+ arr = np.array(img)
88
+ return cv2.countNonZero(arr) == 0
89
+
90
+
91
+ def bbox_area(bbox: list[float]):
92
+ return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
93
+
94
+
95
+ def mask_preprocess(
96
+ masks: list[Image.Image],
97
+ kernel: int = 0,
98
+ x_offset: int = 0,
99
+ y_offset: int = 0,
100
+ merge_invert: int | MergeInvert | str = MergeInvert.NONE,
101
+ ) -> list[Image.Image]:
102
+ """
103
+ The mask_preprocess function takes a list of masks and preprocesses them.
104
+ It dilates and erodes the masks, and offsets them by x_offset and y_offset.
105
+
106
+ Parameters
107
+ ----------
108
+ masks: list[Image.Image]
109
+ A list of masks
110
+ kernel: int
111
+ kernel size of dilation or erosion
112
+ x_offset: int
113
+
114
+ y_offset: int
115
+
116
+
117
+ Returns
118
+ -------
119
+ list[Image.Image]
120
+ A list of processed masks
121
+ """
122
+ if not masks:
123
+ return []
124
+
125
+ if x_offset != 0 or y_offset != 0:
126
+ masks = [offset(m, x_offset, y_offset) for m in masks]
127
+
128
+ if kernel != 0:
129
+ masks = [dilate_erode(m, kernel) for m in masks]
130
+ masks = [m for m in masks if not is_all_black(m)]
131
+
132
+ masks = mask_merge_invert(masks, mode=merge_invert)
133
+
134
+ return masks
135
+
136
+
137
+ # Bbox sorting
138
+ def _key_left_to_right(bbox: list[float]) -> float:
139
+ """
140
+ Left to right
141
+
142
+ Parameters
143
+ ----------
144
+ bbox: list[float]
145
+ list of [x1, y1, x2, y2]
146
+ """
147
+ return bbox[0]
148
+
149
+
150
+ def _key_center_to_edge(bbox: list[float], *, center: tuple[float, float]) -> float:
151
+ """
152
+ Center to edge
153
+
154
+ Parameters
155
+ ----------
156
+ bbox: list[float]
157
+ list of [x1, y1, x2, y2]
158
+ image: Image.Image
159
+ the image
160
+ """
161
+ bbox_center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
162
+ return dist(center, bbox_center)
163
+
164
+
165
+ def _key_area(bbox: list[float]) -> float:
166
+ """
167
+ Large to small
168
+
169
+ Parameters
170
+ ----------
171
+ bbox: list[float]
172
+ list of [x1, y1, x2, y2]
173
+ """
174
+ return -bbox_area(bbox)
175
+
176
+
177
+ def sort_bboxes(
178
+ pred: PredictOutput, order: int | SortBy = SortBy.NONE
179
+ ) -> PredictOutput:
180
+ if order == SortBy.NONE or len(pred.bboxes) <= 1:
181
+ return pred
182
+
183
+ if order == SortBy.LEFT_TO_RIGHT:
184
+ key = _key_left_to_right
185
+ elif order == SortBy.CENTER_TO_EDGE:
186
+ width, height = pred.preview.size
187
+ center = (width / 2, height / 2)
188
+ key = partial(_key_center_to_edge, center=center)
189
+ elif order == SortBy.AREA:
190
+ key = _key_area
191
+ else:
192
+ raise RuntimeError
193
+
194
+ items = len(pred.bboxes)
195
+ idx = sorted(range(items), key=lambda i: key(pred.bboxes[i]))
196
+ pred.bboxes = [pred.bboxes[i] for i in idx]
197
+ pred.masks = [pred.masks[i] for i in idx]
198
+ return pred
199
+
200
+
201
+ # Filter by ratio
202
+ def is_in_ratio(bbox: list[float], low: float, high: float, orig_area: int) -> bool:
203
+ area = bbox_area(bbox)
204
+ return low <= area / orig_area <= high
205
+
206
+
207
+ def filter_by_ratio(pred: PredictOutput, low: float, high: float) -> PredictOutput:
208
+ if not pred.bboxes:
209
+ return pred
210
+
211
+ w, h = pred.preview.size
212
+ orig_area = w * h
213
+ items = len(pred.bboxes)
214
+ idx = [i for i in range(items) if is_in_ratio(pred.bboxes[i], low, high, orig_area)]
215
+ pred.bboxes = [pred.bboxes[i] for i in idx]
216
+ pred.masks = [pred.masks[i] for i in idx]
217
+ return pred
218
+
219
+
220
+ # Merge / Invert
221
+ def mask_merge(masks: list[Image.Image]) -> list[Image.Image]:
222
+ arrs = [np.array(m) for m in masks]
223
+ arr = reduce(cv2.bitwise_or, arrs)
224
+ return [Image.fromarray(arr)]
225
+
226
+
227
+ def mask_invert(masks: list[Image.Image]) -> list[Image.Image]:
228
+ return [ImageChops.invert(m) for m in masks]
229
+
230
+
231
+ def mask_merge_invert(
232
+ masks: list[Image.Image], mode: int | MergeInvert | str
233
+ ) -> list[Image.Image]:
234
+ if isinstance(mode, str):
235
+ mode = MASK_MERGE_INVERT.index(mode)
236
+
237
+ if mode == MergeInvert.NONE or not masks:
238
+ return masks
239
+
240
+ if mode == MergeInvert.MERGE:
241
+ return mask_merge(masks)
242
+
243
+ if mode == MergeInvert.MERGE_INVERT:
244
+ merged = mask_merge(masks)
245
+ return mask_invert(merged)
246
+
247
+ raise RuntimeError
adetailer/mediapipe.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import partial
4
+
5
+ import numpy as np
6
+ from PIL import Image, ImageDraw
7
+
8
+ from adetailer import PredictOutput
9
+ from adetailer.common import create_bbox_from_mask, create_mask_from_bbox
10
+
11
+
12
+ def mediapipe_predict(
13
+ model_type: str, image: Image.Image, confidence: float = 0.3
14
+ ) -> PredictOutput:
15
+ mapping = {
16
+ "mediapipe_face_short": partial(mediapipe_face_detection, 0),
17
+ "mediapipe_face_full": partial(mediapipe_face_detection, 1),
18
+ "mediapipe_face_mesh": mediapipe_face_mesh,
19
+ }
20
+ if model_type in mapping:
21
+ func = mapping[model_type]
22
+ return func(image, confidence)
23
+ raise RuntimeError(f"[-] ADetailer: Invalid mediapipe model type: {model_type}")
24
+
25
+
26
+ def mediapipe_face_detection(
27
+ model_type: int, image: Image.Image, confidence: float = 0.3
28
+ ) -> PredictOutput:
29
+ import mediapipe as mp
30
+
31
+ img_width, img_height = image.size
32
+
33
+ mp_face_detection = mp.solutions.face_detection
34
+ draw_util = mp.solutions.drawing_utils
35
+
36
+ img_array = np.array(image)
37
+
38
+ with mp_face_detection.FaceDetection(
39
+ model_selection=model_type, min_detection_confidence=confidence
40
+ ) as face_detector:
41
+ pred = face_detector.process(img_array)
42
+
43
+ if pred.detections is None:
44
+ return PredictOutput()
45
+
46
+ preview_array = img_array.copy()
47
+
48
+ bboxes = []
49
+ for detection in pred.detections:
50
+ draw_util.draw_detection(preview_array, detection)
51
+
52
+ bbox = detection.location_data.relative_bounding_box
53
+ x1 = bbox.xmin * img_width
54
+ y1 = bbox.ymin * img_height
55
+ w = bbox.width * img_width
56
+ h = bbox.height * img_height
57
+ x2 = x1 + w
58
+ y2 = y1 + h
59
+
60
+ bboxes.append([x1, y1, x2, y2])
61
+
62
+ masks = create_mask_from_bbox(bboxes, image.size)
63
+ preview = Image.fromarray(preview_array)
64
+
65
+ return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
66
+
67
+
68
+ def mediapipe_face_mesh(image: Image.Image, confidence: float = 0.3) -> PredictOutput:
69
+ import mediapipe as mp
70
+ from scipy.spatial import ConvexHull
71
+
72
+ mp_face_mesh = mp.solutions.face_mesh
73
+ draw_util = mp.solutions.drawing_utils
74
+ drawing_styles = mp.solutions.drawing_styles
75
+
76
+ w, h = image.size
77
+
78
+ with mp_face_mesh.FaceMesh(
79
+ static_image_mode=True, max_num_faces=20, min_detection_confidence=confidence
80
+ ) as face_mesh:
81
+ arr = np.array(image)
82
+ pred = face_mesh.process(arr)
83
+
84
+ if pred.multi_face_landmarks is None:
85
+ return PredictOutput()
86
+
87
+ preview = arr.copy()
88
+ masks = []
89
+
90
+ for landmarks in pred.multi_face_landmarks:
91
+ draw_util.draw_landmarks(
92
+ image=preview,
93
+ landmark_list=landmarks,
94
+ connections=mp_face_mesh.FACEMESH_TESSELATION,
95
+ landmark_drawing_spec=None,
96
+ connection_drawing_spec=drawing_styles.get_default_face_mesh_tesselation_style(),
97
+ )
98
+
99
+ points = np.array([(land.x * w, land.y * h) for land in landmarks.landmark])
100
+ hull = ConvexHull(points)
101
+ vertices = hull.vertices
102
+ outline = list(zip(points[vertices, 0], points[vertices, 1]))
103
+
104
+ mask = Image.new("L", image.size, "black")
105
+ draw = ImageDraw.Draw(mask)
106
+ draw.polygon(outline, fill="white")
107
+ masks.append(mask)
108
+
109
+ bboxes = create_bbox_from_mask(masks, image.size)
110
+ preview = Image.fromarray(preview)
111
+ return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
adetailer/ui.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import partial
4
+ from types import SimpleNamespace
5
+ from typing import Any
6
+
7
+ import gradio as gr
8
+
9
+ from adetailer import AFTER_DETAILER, __version__
10
+ from adetailer.args import AD_ENABLE, ALL_ARGS, MASK_MERGE_INVERT
11
+ from controlnet_ext import controlnet_exists, get_cn_inpaint_models
12
+
13
+
14
+ class Widgets(SimpleNamespace):
15
+ def tolist(self):
16
+ return [getattr(self, attr) for attr in ALL_ARGS.attrs]
17
+
18
+
19
+ def gr_interactive(value: bool = True):
20
+ return gr.update(interactive=value)
21
+
22
+
23
+ def ordinal(n: int) -> str:
24
+ d = {1: "st", 2: "nd", 3: "rd"}
25
+ return str(n) + ("th" if 11 <= n % 100 <= 13 else d.get(n % 10, "th"))
26
+
27
+
28
+ def suffix(n: int, c: str = " ") -> str:
29
+ return "" if n == 0 else c + ordinal(n + 1)
30
+
31
+
32
+ def on_widget_change(state: dict, value: Any, *, attr: str):
33
+ state[attr] = value
34
+ return state
35
+
36
+
37
+ def on_generate_click(state: dict, *values: Any):
38
+ for attr, value in zip(ALL_ARGS.attrs, values):
39
+ state[attr] = value
40
+ return state
41
+
42
+
43
+ def elem_id(item_id: str, n: int, is_img2img: bool) -> str:
44
+ tap = "img2img" if is_img2img else "txt2img"
45
+ suf = suffix(n, "_")
46
+ return f"script_{tap}_adetailer_{item_id}{suf}"
47
+
48
+
49
+ def adui(
50
+ num_models: int,
51
+ is_img2img: bool,
52
+ model_list: list[str],
53
+ t2i_button: gr.Button,
54
+ i2i_button: gr.Button,
55
+ ):
56
+ states = []
57
+ infotext_fields = []
58
+ eid = partial(elem_id, n=0, is_img2img=is_img2img)
59
+
60
+ with gr.Accordion(AFTER_DETAILER, open=False, elem_id=eid("ad_main_accordion")):
61
+ with gr.Row():
62
+ with gr.Column(scale=6):
63
+ ad_enable = gr.Checkbox(
64
+ label="Enable ADetailer",
65
+ value=False,
66
+ visible=True,
67
+ elem_id=eid("ad_enable"),
68
+ )
69
+
70
+ with gr.Column(scale=1, min_width=180):
71
+ gr.Markdown(
72
+ f"v{__version__}",
73
+ elem_id=eid("ad_version"),
74
+ )
75
+
76
+ infotext_fields.append((ad_enable, AD_ENABLE.name))
77
+
78
+ with gr.Group(), gr.Tabs():
79
+ for n in range(num_models):
80
+ with gr.Tab(ordinal(n + 1)):
81
+ state, infofields = one_ui_group(
82
+ n=n,
83
+ is_img2img=is_img2img,
84
+ model_list=model_list,
85
+ t2i_button=t2i_button,
86
+ i2i_button=i2i_button,
87
+ )
88
+
89
+ states.append(state)
90
+ infotext_fields.extend(infofields)
91
+
92
+ # components: [bool, dict, dict, ...]
93
+ components = [ad_enable] + states
94
+ return components, infotext_fields
95
+
96
+
97
+ def one_ui_group(
98
+ n: int,
99
+ is_img2img: bool,
100
+ model_list: list[str],
101
+ t2i_button: gr.Button,
102
+ i2i_button: gr.Button,
103
+ ):
104
+ w = Widgets()
105
+ state = gr.State({})
106
+ eid = partial(elem_id, n=n, is_img2img=is_img2img)
107
+
108
+ with gr.Row():
109
+ model_choices = model_list + ["None"] if n == 0 else ["None"] + model_list
110
+
111
+ w.ad_model = gr.Dropdown(
112
+ label="ADetailer model" + suffix(n),
113
+ choices=model_choices,
114
+ value=model_choices[0],
115
+ visible=True,
116
+ type="value",
117
+ elem_id=eid("ad_model"),
118
+ )
119
+
120
+ with gr.Group():
121
+ with gr.Row(elem_id=eid("ad_toprow_prompt")):
122
+ w.ad_prompt = gr.Textbox(
123
+ label="ad_prompt" + suffix(n),
124
+ show_label=False,
125
+ lines=3,
126
+ placeholder="ADetailer prompt" + suffix(n),
127
+ elem_id=eid("ad_prompt"),
128
+ )
129
+
130
+ with gr.Row(elem_id=eid("ad_toprow_negative_prompt")):
131
+ w.ad_negative_prompt = gr.Textbox(
132
+ label="ad_negative_prompt" + suffix(n),
133
+ show_label=False,
134
+ lines=2,
135
+ placeholder="ADetailer negative prompt" + suffix(n),
136
+ elem_id=eid("ad_negative_prompt"),
137
+ )
138
+
139
+ with gr.Group():
140
+ with gr.Accordion(
141
+ "Detection", open=False, elem_id=eid("ad_detection_accordion")
142
+ ):
143
+ detection(w, n, is_img2img)
144
+
145
+ with gr.Accordion(
146
+ "Mask Preprocessing",
147
+ open=False,
148
+ elem_id=eid("ad_mask_preprocessing_accordion"),
149
+ ):
150
+ mask_preprocessing(w, n, is_img2img)
151
+
152
+ with gr.Accordion(
153
+ "Inpainting", open=False, elem_id=eid("ad_inpainting_accordion")
154
+ ):
155
+ inpainting(w, n, is_img2img)
156
+
157
+ with gr.Group(), gr.Row(variant="panel"):
158
+ cn_inpaint_models = ["None"] + get_cn_inpaint_models()
159
+
160
+ w.ad_controlnet_model = gr.Dropdown(
161
+ label="ControlNet model" + suffix(n),
162
+ choices=cn_inpaint_models,
163
+ value="None",
164
+ visible=True,
165
+ type="value",
166
+ interactive=controlnet_exists,
167
+ elem_id=eid("ad_controlnet_model"),
168
+ )
169
+
170
+ w.ad_controlnet_weight = gr.Slider(
171
+ label="ControlNet weight" + suffix(n),
172
+ minimum=0.0,
173
+ maximum=1.0,
174
+ step=0.05,
175
+ value=1.0,
176
+ visible=True,
177
+ interactive=controlnet_exists,
178
+ elem_id=eid("ad_controlnet_weight"),
179
+ )
180
+
181
+ w.ad_controlnet_guidance_end = gr.Slider(
182
+ label="ControlNet guidance end" + suffix(n),
183
+ minimum=0.0,
184
+ maximum=1.0,
185
+ step=0.05,
186
+ value=1.0,
187
+ visible=True,
188
+ interactive=controlnet_exists,
189
+ elem_id=eid("ad_controlnet_guidance_end"),
190
+ )
191
+
192
+ for attr in ALL_ARGS.attrs:
193
+ widget = getattr(w, attr)
194
+ on_change = partial(on_widget_change, attr=attr)
195
+ widget.change(
196
+ fn=on_change, inputs=[state, widget], outputs=[state], queue=False
197
+ )
198
+
199
+ all_inputs = [state] + w.tolist()
200
+ target_button = i2i_button if is_img2img else t2i_button
201
+ target_button.click(
202
+ fn=on_generate_click, inputs=all_inputs, outputs=state, queue=False
203
+ )
204
+
205
+ infotext_fields = [(getattr(w, attr), name + suffix(n)) for attr, name in ALL_ARGS]
206
+
207
+ return state, infotext_fields
208
+
209
+
210
+ def detection(w: Widgets, n: int, is_img2img: bool):
211
+ eid = partial(elem_id, n=n, is_img2img=is_img2img)
212
+
213
+ with gr.Row():
214
+ with gr.Column():
215
+ w.ad_confidence = gr.Slider(
216
+ label="Detection model confidence threshold" + suffix(n),
217
+ minimum=0.0,
218
+ maximum=1.0,
219
+ step=0.01,
220
+ value=0.3,
221
+ visible=True,
222
+ elem_id=eid("ad_confidence"),
223
+ )
224
+
225
+ with gr.Column(variant="compact"):
226
+ w.ad_mask_min_ratio = gr.Slider(
227
+ label="Mask min area ratio" + suffix(n),
228
+ minimum=0.0,
229
+ maximum=1.0,
230
+ step=0.001,
231
+ value=0.0,
232
+ visible=True,
233
+ elem_id=eid("ad_mask_min_ratio"),
234
+ )
235
+ w.ad_mask_max_ratio = gr.Slider(
236
+ label="Mask max area ratio" + suffix(n),
237
+ minimum=0.0,
238
+ maximum=1.0,
239
+ step=0.001,
240
+ value=1.0,
241
+ visible=True,
242
+ elem_id=eid("ad_mask_max_ratio"),
243
+ )
244
+
245
+
246
+ def mask_preprocessing(w: Widgets, n: int, is_img2img: bool):
247
+ eid = partial(elem_id, n=n, is_img2img=is_img2img)
248
+
249
+ with gr.Group():
250
+ with gr.Row():
251
+ with gr.Column(variant="compact"):
252
+ w.ad_x_offset = gr.Slider(
253
+ label="Mask x(→) offset" + suffix(n),
254
+ minimum=-200,
255
+ maximum=200,
256
+ step=1,
257
+ value=0,
258
+ visible=True,
259
+ elem_id=eid("ad_x_offset"),
260
+ )
261
+ w.ad_y_offset = gr.Slider(
262
+ label="Mask y(↑) offset" + suffix(n),
263
+ minimum=-200,
264
+ maximum=200,
265
+ step=1,
266
+ value=0,
267
+ visible=True,
268
+ elem_id=eid("ad_y_offset"),
269
+ )
270
+
271
+ with gr.Column(variant="compact"):
272
+ w.ad_dilate_erode = gr.Slider(
273
+ label="Mask erosion (-) / dilation (+)" + suffix(n),
274
+ minimum=-128,
275
+ maximum=128,
276
+ step=4,
277
+ value=4,
278
+ visible=True,
279
+ elem_id=eid("ad_dilate_erode"),
280
+ )
281
+
282
+ with gr.Row():
283
+ w.ad_mask_merge_invert = gr.Radio(
284
+ label="Mask merge mode" + suffix(n),
285
+ choices=MASK_MERGE_INVERT,
286
+ value="None",
287
+ elem_id=eid("ad_mask_merge_invert"),
288
+ )
289
+
290
+
291
+ def inpainting(w: Widgets, n: int, is_img2img: bool):
292
+ eid = partial(elem_id, n=n, is_img2img=is_img2img)
293
+
294
+ with gr.Group():
295
+ with gr.Row():
296
+ w.ad_mask_blur = gr.Slider(
297
+ label="Inpaint mask blur" + suffix(n),
298
+ minimum=0,
299
+ maximum=64,
300
+ step=1,
301
+ value=4,
302
+ visible=True,
303
+ elem_id=eid("ad_mask_blur"),
304
+ )
305
+
306
+ w.ad_denoising_strength = gr.Slider(
307
+ label="Inpaint denoising strength" + suffix(n),
308
+ minimum=0.0,
309
+ maximum=1.0,
310
+ step=0.01,
311
+ value=0.4,
312
+ visible=True,
313
+ elem_id=eid("ad_denoising_strength"),
314
+ )
315
+
316
+ with gr.Row():
317
+ with gr.Column(variant="compact"):
318
+ w.ad_inpaint_only_masked = gr.Checkbox(
319
+ label="Inpaint only masked" + suffix(n),
320
+ value=True,
321
+ visible=True,
322
+ elem_id=eid("ad_inpaint_full_res"),
323
+ )
324
+ w.ad_inpaint_only_masked_padding = gr.Slider(
325
+ label="Inpaint only masked padding, pixels" + suffix(n),
326
+ minimum=0,
327
+ maximum=256,
328
+ step=4,
329
+ value=32,
330
+ visible=True,
331
+ elem_id=eid("ad_inpaint_full_res_padding"),
332
+ )
333
+
334
+ w.ad_inpaint_only_masked.change(
335
+ gr_interactive,
336
+ inputs=w.ad_inpaint_only_masked,
337
+ outputs=w.ad_inpaint_only_masked_padding,
338
+ queue=False,
339
+ )
340
+
341
+ with gr.Column(variant="compact"):
342
+ w.ad_use_inpaint_width_height = gr.Checkbox(
343
+ label="Use separate width/height" + suffix(n),
344
+ value=False,
345
+ visible=True,
346
+ elem_id=eid("ad_use_inpaint_width_height"),
347
+ )
348
+
349
+ w.ad_inpaint_width = gr.Slider(
350
+ label="inpaint width" + suffix(n),
351
+ minimum=64,
352
+ maximum=2048,
353
+ step=4,
354
+ value=512,
355
+ visible=True,
356
+ elem_id=eid("ad_inpaint_width"),
357
+ )
358
+
359
+ w.ad_inpaint_height = gr.Slider(
360
+ label="inpaint height" + suffix(n),
361
+ minimum=64,
362
+ maximum=2048,
363
+ step=4,
364
+ value=512,
365
+ visible=True,
366
+ elem_id=eid("ad_inpaint_height"),
367
+ )
368
+
369
+ w.ad_use_inpaint_width_height.change(
370
+ lambda value: (gr_interactive(value), gr_interactive(value)),
371
+ inputs=w.ad_use_inpaint_width_height,
372
+ outputs=[w.ad_inpaint_width, w.ad_inpaint_height],
373
+ queue=False,
374
+ )
375
+
376
+ with gr.Row():
377
+ with gr.Column(variant="compact"):
378
+ w.ad_use_steps = gr.Checkbox(
379
+ label="Use separate steps" + suffix(n),
380
+ value=False,
381
+ visible=True,
382
+ elem_id=eid("ad_use_steps"),
383
+ )
384
+
385
+ w.ad_steps = gr.Slider(
386
+ label="ADetailer steps" + suffix(n),
387
+ minimum=1,
388
+ maximum=150,
389
+ step=1,
390
+ value=28,
391
+ visible=True,
392
+ elem_id=eid("ad_steps"),
393
+ )
394
+
395
+ w.ad_use_steps.change(
396
+ gr_interactive,
397
+ inputs=w.ad_use_steps,
398
+ outputs=w.ad_steps,
399
+ queue=False,
400
+ )
401
+
402
+ with gr.Column(variant="compact"):
403
+ w.ad_use_cfg_scale = gr.Checkbox(
404
+ label="Use separate CFG scale" + suffix(n),
405
+ value=False,
406
+ visible=True,
407
+ elem_id=eid("ad_use_cfg_scale"),
408
+ )
409
+
410
+ w.ad_cfg_scale = gr.Slider(
411
+ label="ADetailer CFG scale" + suffix(n),
412
+ minimum=0.0,
413
+ maximum=30.0,
414
+ step=0.5,
415
+ value=7.0,
416
+ visible=True,
417
+ elem_id=eid("ad_cfg_scale"),
418
+ )
419
+
420
+ w.ad_use_cfg_scale.change(
421
+ gr_interactive,
422
+ inputs=w.ad_use_cfg_scale,
423
+ outputs=w.ad_cfg_scale,
424
+ queue=False,
425
+ )
426
+
427
+ with gr.Row():
428
+ w.ad_restore_face = gr.Checkbox(
429
+ label="Restore faces after ADetailer" + suffix(n),
430
+ value=False,
431
+ elem_id=eid("ad_restore_face"),
432
+ )
adetailer/ultralytics.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import cv2
6
+ from PIL import Image
7
+
8
+ from adetailer import PredictOutput
9
+ from adetailer.common import create_mask_from_bbox
10
+
11
+
12
+ def ultralytics_predict(
13
+ model_path: str | Path,
14
+ image: Image.Image,
15
+ confidence: float = 0.3,
16
+ device: str = "",
17
+ ) -> PredictOutput:
18
+ from ultralytics import YOLO
19
+
20
+ model_path = str(model_path)
21
+
22
+ model = YOLO(model_path)
23
+ pred = model(image, conf=confidence, device=device)
24
+
25
+ bboxes = pred[0].boxes.xyxy.cpu().numpy()
26
+ if bboxes.size == 0:
27
+ return PredictOutput()
28
+ bboxes = bboxes.tolist()
29
+
30
+ if pred[0].masks is None:
31
+ masks = create_mask_from_bbox(bboxes, image.size)
32
+ else:
33
+ masks = mask_to_pil(pred[0].masks.data, image.size)
34
+ preview = pred[0].plot()
35
+ preview = cv2.cvtColor(preview, cv2.COLOR_BGR2RGB)
36
+ preview = Image.fromarray(preview)
37
+
38
+ return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
39
+
40
+
41
+ def mask_to_pil(masks, shape: tuple[int, int]) -> list[Image.Image]:
42
+ """
43
+ Parameters
44
+ ----------
45
+ masks: torch.Tensor, dtype=torch.float32, shape=(N, H, W).
46
+ The device can be CUDA, but `to_pil_image` takes care of that.
47
+
48
+ shape: tuple[int, int]
49
+ (width, height) of the original image
50
+ """
51
+ from torchvision.transforms.functional import to_pil_image
52
+
53
+ n = masks.shape[0]
54
+ return [to_pil_image(masks[i], mode="L").resize(shape) for i in range(n)]
controlnet_ext/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .controlnet_ext import ControlNetExt, controlnet_exists, get_cn_inpaint_models
2
+
3
+ __all__ = [
4
+ "ControlNetExt",
5
+ "controlnet_exists",
6
+ "get_cn_inpaint_models",
7
+ ]
controlnet_ext/controlnet_ext.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ from functools import lru_cache
5
+ from pathlib import Path
6
+ import re
7
+
8
+ from modules import sd_models, shared
9
+ from modules.paths import data_path, models_path, script_path
10
+
11
+ ext_path = Path(data_path, "extensions")
12
+ ext_builtin_path = Path(script_path, "extensions-builtin")
13
+ is_in_builtin = False # compatibility for vladmandic/automatic
14
+ controlnet_exists = False
15
+ controlnet_enabled_models = {
16
+ 'inpaint': 'inpaint_global_harmonious',
17
+ 'scribble': 't2ia_sketch_pidi',
18
+ 'lineart': 'lineart_coarse',
19
+ 'openpose': 'openpose_full',
20
+ 'tile': None,
21
+ }
22
+ controlnet_model_regex = re.compile(r'.*('+('|'.join(controlnet_enabled_models.keys()))+').*')
23
+
24
+ if ext_path.exists():
25
+ controlnet_exists = any(
26
+ p.name == "sd-webui-controlnet" for p in ext_path.iterdir() if p.is_dir()
27
+ )
28
+
29
+ if not controlnet_exists and ext_builtin_path.exists():
30
+ controlnet_exists = any(
31
+ p.name == "sd-webui-controlnet"
32
+ for p in ext_builtin_path.iterdir()
33
+ if p.is_dir()
34
+ )
35
+
36
+ if controlnet_exists:
37
+ is_in_builtin = True
38
+
39
+
40
+ class ControlNetExt:
41
+ def __init__(self):
42
+ self.cn_models = ["None"]
43
+ self.cn_available = False
44
+ self.external_cn = None
45
+
46
+ def init_controlnet(self):
47
+ if is_in_builtin:
48
+ import_path = "extensions-builtin.sd-webui-controlnet.scripts.external_code"
49
+ else:
50
+ import_path = "extensions.sd-webui-controlnet.scripts.external_code"
51
+
52
+ self.external_cn = importlib.import_module(import_path, "external_code")
53
+ self.cn_available = True
54
+ models = self.external_cn.get_models()
55
+ self.cn_models.extend(m for m in models if controlnet_model_regex.match(m))
56
+
57
+ def _update_scripts_args(self, p, model: str, weight: float, guidance_end: float):
58
+ module = None
59
+ for m, v in controlnet_enabled_models.items():
60
+ if m in model:
61
+ module = v
62
+ break
63
+
64
+ cn_units = [
65
+ self.external_cn.ControlNetUnit(
66
+ model=model,
67
+ weight=weight,
68
+ control_mode=self.external_cn.ControlMode.BALANCED,
69
+ module=module,
70
+ guidance_end=guidance_end,
71
+ pixel_perfect=True,
72
+ )
73
+ ]
74
+
75
+ self.external_cn.update_cn_script_in_processing(p, cn_units)
76
+
77
+ def update_scripts_args(self, p, model: str, weight: float, guidance_end: float):
78
+ if self.cn_available and model != "None":
79
+ self._update_scripts_args(p, model, weight, guidance_end)
80
+
81
+
82
+ def get_cn_model_dirs() -> list[Path]:
83
+ cn_model_dir = Path(models_path, "ControlNet")
84
+ if is_in_builtin:
85
+ cn_model_dir_old = Path(ext_builtin_path, "sd-webui-controlnet", "models")
86
+ else:
87
+ cn_model_dir_old = Path(ext_path, "sd-webui-controlnet", "models")
88
+ ext_dir1 = shared.opts.data.get("control_net_models_path", "")
89
+ ext_dir2 = shared.opts.data.get("controlnet_dir", "")
90
+
91
+ dirs = [cn_model_dir, cn_model_dir_old]
92
+ for ext_dir in [ext_dir1, ext_dir2]:
93
+ if ext_dir:
94
+ dirs.append(Path(ext_dir))
95
+
96
+ return dirs
97
+
98
+
99
+ @lru_cache
100
+ def _get_cn_inpaint_models() -> list[str]:
101
+ """
102
+ Since we can't import ControlNet, we use a function that does something like
103
+ controlnet's `list(global_state.cn_models_names.values())`.
104
+ """
105
+ cn_model_exts = (".pt", ".pth", ".ckpt", ".safetensors")
106
+ dirs = get_cn_model_dirs()
107
+ name_filter = shared.opts.data.get("control_net_models_name_filter", "")
108
+ name_filter = name_filter.strip(" ").lower()
109
+
110
+ model_paths = []
111
+
112
+ for base in dirs:
113
+ if not base.exists():
114
+ continue
115
+
116
+ for p in base.rglob("*"):
117
+ if p.is_file() and p.suffix in cn_model_exts and controlnet_model_regex.match(p.name):
118
+ if name_filter and name_filter not in p.name.lower():
119
+ continue
120
+ model_paths.append(p)
121
+ model_paths.sort(key=lambda p: p.name)
122
+
123
+ models = []
124
+ for p in model_paths:
125
+ model_hash = sd_models.model_hash(p)
126
+ name = f"{p.stem} [{model_hash}]"
127
+ models.append(name)
128
+ return models
129
+
130
+
131
+ def get_cn_inpaint_models() -> list[str]:
132
+ if controlnet_exists:
133
+ return _get_cn_inpaint_models()
134
+ return []
controlnet_ext/restore.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from contextlib import contextmanager
4
+
5
+ from modules import img2img, processing, shared
6
+
7
+
8
+ def cn_restore_unet_hook(p, cn_latest_network):
9
+ if cn_latest_network is not None:
10
+ unet = p.sd_model.model.diffusion_model
11
+ cn_latest_network.restore(unet)
12
+
13
+
14
+ class CNHijackRestore:
15
+ def __init__(self):
16
+ self.process = hasattr(processing, "__controlnet_original_process_images_inner")
17
+ self.img2img = hasattr(img2img, "__controlnet_original_process_batch")
18
+
19
+ def __enter__(self):
20
+ if self.process:
21
+ self.orig_process = processing.process_images_inner
22
+ processing.process_images_inner = getattr(
23
+ processing, "__controlnet_original_process_images_inner"
24
+ )
25
+ if self.img2img:
26
+ self.orig_img2img = img2img.process_batch
27
+ img2img.process_batch = getattr(
28
+ img2img, "__controlnet_original_process_batch"
29
+ )
30
+
31
+ def __exit__(self, *args, **kwargs):
32
+ if self.process:
33
+ processing.process_images_inner = self.orig_process
34
+ if self.img2img:
35
+ img2img.process_batch = self.orig_img2img
36
+
37
+
38
+ @contextmanager
39
+ def cn_allow_script_control():
40
+ orig = False
41
+ if "control_net_allow_script_control" in shared.opts.data:
42
+ try:
43
+ orig = shared.opts.data["control_net_allow_script_control"]
44
+ shared.opts.data["control_net_allow_script_control"] = True
45
+ yield
46
+ finally:
47
+ shared.opts.data["control_net_allow_script_control"] = orig
48
+ else:
49
+ yield
install.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib.util
4
+ import subprocess
5
+ import sys
6
+ from importlib.metadata import version # python >= 3.8
7
+
8
+ from packaging.version import parse
9
+
10
+
11
+ def is_installed(
12
+ package: str, min_version: str | None = None, max_version: str | None = None
13
+ ):
14
+ try:
15
+ spec = importlib.util.find_spec(package)
16
+ except ModuleNotFoundError:
17
+ return False
18
+
19
+ if spec is None:
20
+ return False
21
+
22
+ if not min_version and not max_version:
23
+ return True
24
+
25
+ if not min_version:
26
+ min_version = "0.0.0"
27
+ if not max_version:
28
+ max_version = "99999999.99999999.99999999"
29
+
30
+ if package == "google.protobuf":
31
+ package = "protobuf"
32
+
33
+ try:
34
+ pkg_version = version(package)
35
+ return parse(min_version) <= parse(pkg_version) <= parse(max_version)
36
+ except Exception:
37
+ return False
38
+
39
+
40
+ def run_pip(*args):
41
+ subprocess.run([sys.executable, "-m", "pip", "install", *args])
42
+
43
+
44
+ def install():
45
+ deps = [
46
+ # requirements
47
+ ("ultralytics", "8.0.97", None),
48
+ ("mediapipe", "0.10.0", None),
49
+ ("huggingface_hub", None, None),
50
+ ("pydantic", None, None),
51
+ # mediapipe
52
+ ("protobuf", "3.20.0", "3.20.9999"),
53
+ ]
54
+
55
+ for pkg, low, high in deps:
56
+ # https://github.com/protocolbuffers/protobuf/tree/main/python
57
+ name = "google.protobuf" if pkg == "protobuf" else pkg
58
+
59
+ if not is_installed(name, low, high):
60
+ if low and high:
61
+ cmd = f"{pkg}>={low},<={high}"
62
+ elif low:
63
+ cmd = f"{pkg}>={low}"
64
+ elif high:
65
+ cmd = f"{pkg}<={high}"
66
+ else:
67
+ cmd = pkg
68
+
69
+ run_pip("-U", cmd)
70
+
71
+
72
+ try:
73
+ import launch
74
+
75
+ skip_install = launch.args.skip_install
76
+ except Exception:
77
+ skip_install = False
78
+
79
+ if not skip_install:
80
+ install()
preload.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+
4
+ def preload(parser: argparse.ArgumentParser):
5
+ parser.add_argument(
6
+ "--ad-no-huggingface",
7
+ action="store_true",
8
+ help="Don't use adetailer models from huggingface",
9
+ )
pyproject.toml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "adetailer"
3
+ description = "An object detection and auto-mask extension for stable diffusion webui."
4
+ authors = [
5
+ {name = "dowon", email = "ks2515@naver.com"},
6
+ ]
7
+ requires-python = ">=3.8,<3.12"
8
+ readme = "README.md"
9
+ license = {text = "AGPL-3.0"}
10
+
11
+ [project.urls]
12
+ repository = "https://github.com/Bing-su/adetailer"
13
+
14
+ [tool.isort]
15
+ profile = "black"
16
+ known_first_party = ["launch", "modules"]
17
+
18
+ [tool.ruff]
19
+ select = ["A", "B", "C4", "E", "F", "I001", "ISC", "N", "PIE", "PT", "RET", "SIM", "UP", "W"]
20
+ ignore = ["B008", "B905", "E501", "F401", "UP007"]
21
+
22
+ [tool.ruff.isort]
23
+ known-first-party = ["launch", "modules"]
24
+
25
+ [tool.ruff.per-file-ignores]
26
+ "sd_webui/*.py" = ["B027", "F403"]
scripts/!adetailer.py ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import platform
5
+ import re
6
+ import sys
7
+ import traceback
8
+ from contextlib import contextmanager, suppress
9
+ from copy import copy, deepcopy
10
+ from pathlib import Path
11
+ from textwrap import dedent
12
+ from typing import Any
13
+
14
+ import gradio as gr
15
+ import torch
16
+
17
+ import modules # noqa: F401
18
+ from adetailer import (
19
+ AFTER_DETAILER,
20
+ __version__,
21
+ get_models,
22
+ mediapipe_predict,
23
+ ultralytics_predict,
24
+ )
25
+ from adetailer.args import ALL_ARGS, BBOX_SORTBY, ADetailerArgs, EnableChecker
26
+ from adetailer.common import PredictOutput
27
+ from adetailer.mask import filter_by_ratio, mask_preprocess, sort_bboxes
28
+ from adetailer.ui import adui, ordinal, suffix
29
+ from controlnet_ext import ControlNetExt, controlnet_exists
30
+ from controlnet_ext.restore import (
31
+ CNHijackRestore,
32
+ cn_allow_script_control,
33
+ cn_restore_unet_hook,
34
+ )
35
+ from sd_webui import images, safe, script_callbacks, scripts, shared
36
+ from sd_webui.paths import data_path, models_path
37
+ from sd_webui.processing import (
38
+ StableDiffusionProcessingImg2Img,
39
+ create_infotext,
40
+ process_images,
41
+ )
42
+ from sd_webui.shared import cmd_opts, opts, state
43
+
44
+ with suppress(ImportError):
45
+ from rich import print
46
+
47
+
48
+ no_huggingface = getattr(cmd_opts, "ad_no_huggingface", False)
49
+ adetailer_dir = Path(models_path, "adetailer")
50
+ model_mapping = get_models(adetailer_dir, huggingface=not no_huggingface)
51
+ txt2img_submit_button = img2img_submit_button = None
52
+ SCRIPT_DEFAULT = "dynamic_prompting,dynamic_thresholding,wildcard_recursive,wildcards"
53
+
54
+ if (
55
+ not adetailer_dir.exists()
56
+ and adetailer_dir.parent.exists()
57
+ and os.access(adetailer_dir.parent, os.W_OK)
58
+ ):
59
+ adetailer_dir.mkdir()
60
+
61
+ print(
62
+ f"[-] ADetailer initialized. version: {__version__}, num models: {len(model_mapping)}"
63
+ )
64
+
65
+
66
+ @contextmanager
67
+ def change_torch_load():
68
+ orig = torch.load
69
+ try:
70
+ torch.load = safe.unsafe_torch_load
71
+ yield
72
+ finally:
73
+ torch.load = orig
74
+
75
+
76
+ @contextmanager
77
+ def pause_total_tqdm():
78
+ orig = opts.data.get("multiple_tqdm", True)
79
+ try:
80
+ opts.data["multiple_tqdm"] = False
81
+ yield
82
+ finally:
83
+ opts.data["multiple_tqdm"] = orig
84
+
85
+
86
+ class AfterDetailerScript(scripts.Script):
87
+ def __init__(self):
88
+ super().__init__()
89
+ self.ultralytics_device = self.get_ultralytics_device()
90
+
91
+ self.controlnet_ext = None
92
+ self.cn_script = None
93
+ self.cn_latest_network = None
94
+
95
+ def title(self):
96
+ return AFTER_DETAILER
97
+
98
+ def show(self, is_img2img):
99
+ return scripts.AlwaysVisible
100
+
101
+ def ui(self, is_img2img):
102
+ num_models = opts.data.get("ad_max_models", 2)
103
+ model_list = list(model_mapping.keys())
104
+
105
+ components, infotext_fields = adui(
106
+ num_models,
107
+ is_img2img,
108
+ model_list,
109
+ txt2img_submit_button,
110
+ img2img_submit_button,
111
+ )
112
+
113
+ self.infotext_fields = infotext_fields
114
+ return components
115
+
116
+ def init_controlnet_ext(self) -> None:
117
+ if self.controlnet_ext is not None:
118
+ return
119
+ self.controlnet_ext = ControlNetExt()
120
+
121
+ if controlnet_exists:
122
+ try:
123
+ self.controlnet_ext.init_controlnet()
124
+ except ImportError:
125
+ error = traceback.format_exc()
126
+ print(
127
+ f"[-] ADetailer: ControlNetExt init failed:\n{error}",
128
+ file=sys.stderr,
129
+ )
130
+
131
+ def update_controlnet_args(self, p, args: ADetailerArgs) -> None:
132
+ if self.controlnet_ext is None:
133
+ self.init_controlnet_ext()
134
+
135
+ if (
136
+ self.controlnet_ext is not None
137
+ and self.controlnet_ext.cn_available
138
+ and args.ad_controlnet_model != "None"
139
+ ):
140
+ self.controlnet_ext.update_scripts_args(
141
+ p, args.ad_controlnet_model, args.ad_controlnet_weight, args.ad_controlnet_guidance_end
142
+ )
143
+
144
+ def is_ad_enabled(self, *args_) -> bool:
145
+ if len(args_) == 0 or (len(args_) == 1 and isinstance(args_[0], bool)):
146
+ message = f"""
147
+ [-] ADetailer: Not enough arguments passed to ADetailer.
148
+ input: {args_!r}
149
+ """
150
+ raise ValueError(dedent(message))
151
+ a0 = args_[0]
152
+ a1 = args_[1] if len(args_) > 1 else None
153
+ checker = EnableChecker(a0=a0, a1=a1)
154
+ return checker.is_enabled()
155
+
156
+ def get_args(self, *args_) -> list[ADetailerArgs]:
157
+ """
158
+ `args_` is at least 1 in length by `is_ad_enabled` immediately above
159
+ """
160
+ args = [arg for arg in args_ if isinstance(arg, dict)]
161
+
162
+ if not args:
163
+ message = f"[-] ADetailer: Invalid arguments passed to ADetailer: {args_!r}"
164
+ raise ValueError(message)
165
+
166
+ all_inputs = []
167
+
168
+ for n, arg_dict in enumerate(args, 1):
169
+ try:
170
+ inp = ADetailerArgs(**arg_dict)
171
+ except ValueError as e:
172
+ msgs = [
173
+ f"[-] ADetailer: ValidationError when validating {ordinal(n)} arguments: {e}\n"
174
+ ]
175
+ for attr in ALL_ARGS.attrs:
176
+ arg = arg_dict.get(attr)
177
+ dtype = type(arg)
178
+ arg = "DEFAULT" if arg is None else repr(arg)
179
+ msgs.append(f" {attr}: {arg} ({dtype})")
180
+ raise ValueError("\n".join(msgs)) from e
181
+
182
+ all_inputs.append(inp)
183
+
184
+ return all_inputs
185
+
186
+ def extra_params(self, arg_list: list[ADetailerArgs]) -> dict:
187
+ params = {}
188
+ for n, args in enumerate(arg_list):
189
+ params.update(args.extra_params(suffix=suffix(n)))
190
+ params["ADetailer version"] = __version__
191
+ return params
192
+
193
+ @staticmethod
194
+ def get_ultralytics_device() -> str:
195
+ '`device = ""` means autodetect'
196
+ device = ""
197
+ if platform.system() == "Darwin":
198
+ return device
199
+
200
+ if any(getattr(cmd_opts, vram, False) for vram in ["lowvram", "medvram"]):
201
+ device = "cpu"
202
+
203
+ return device
204
+
205
+ def prompt_blank_replacement(
206
+ self, all_prompts: list[str], i: int, default: str
207
+ ) -> str:
208
+ if not all_prompts:
209
+ return default
210
+ if i < len(all_prompts):
211
+ return all_prompts[i]
212
+ j = i % len(all_prompts)
213
+ return all_prompts[j]
214
+
215
+ def _get_prompt(
216
+ self, ad_prompt: str, all_prompts: list[str], i: int, default: str
217
+ ) -> list[str]:
218
+ prompts = re.split(r"\s*\[SEP\]\s*", ad_prompt)
219
+ blank_replacement = self.prompt_blank_replacement(all_prompts, i, default)
220
+ for n in range(len(prompts)):
221
+ if not prompts[n]:
222
+ prompts[n] = blank_replacement
223
+ return prompts
224
+
225
+ def get_prompt(self, p, args: ADetailerArgs) -> tuple[list[str], list[str]]:
226
+ i = p._idx
227
+
228
+ prompt = self._get_prompt(args.ad_prompt, p.all_prompts, i, p.prompt)
229
+ negative_prompt = self._get_prompt(
230
+ args.ad_negative_prompt, p.all_negative_prompts, i, p.negative_prompt
231
+ )
232
+
233
+ return prompt, negative_prompt
234
+
235
+ def get_seed(self, p) -> tuple[int, int]:
236
+ i = p._idx
237
+
238
+ if not p.all_seeds:
239
+ seed = p.seed
240
+ elif i < len(p.all_seeds):
241
+ seed = p.all_seeds[i]
242
+ else:
243
+ j = i % len(p.all_seeds)
244
+ seed = p.all_seeds[j]
245
+
246
+ if not p.all_subseeds:
247
+ subseed = p.subseed
248
+ elif i < len(p.all_subseeds):
249
+ subseed = p.all_subseeds[i]
250
+ else:
251
+ j = i % len(p.all_subseeds)
252
+ subseed = p.all_subseeds[j]
253
+
254
+ return seed, subseed
255
+
256
+ def get_width_height(self, p, args: ADetailerArgs) -> tuple[int, int]:
257
+ if args.ad_use_inpaint_width_height:
258
+ width = args.ad_inpaint_width
259
+ height = args.ad_inpaint_height
260
+ else:
261
+ width = p.width
262
+ height = p.height
263
+
264
+ return width, height
265
+
266
+ def get_steps(self, p, args: ADetailerArgs) -> int:
267
+ if args.ad_use_steps:
268
+ return args.ad_steps
269
+ return p.steps
270
+
271
+ def get_cfg_scale(self, p, args: ADetailerArgs) -> float:
272
+ if args.ad_use_cfg_scale:
273
+ return args.ad_cfg_scale
274
+ return p.cfg_scale
275
+
276
+ def infotext(self, p) -> str:
277
+ return create_infotext(
278
+ p, p.all_prompts, p.all_seeds, p.all_subseeds, None, 0, 0
279
+ )
280
+
281
+ def write_params_txt(self, p) -> None:
282
+ infotext = self.infotext(p)
283
+ params_txt = Path(data_path, "params.txt")
284
+ params_txt.write_text(infotext, encoding="utf-8")
285
+
286
+ def script_filter(self, p, args: ADetailerArgs):
287
+ script_runner = copy(p.scripts)
288
+ script_args = deepcopy(p.script_args)
289
+ self.disable_controlnet_units(script_args)
290
+
291
+ ad_only_seleted_scripts = opts.data.get("ad_only_seleted_scripts", True)
292
+ if not ad_only_seleted_scripts:
293
+ return script_runner, script_args
294
+
295
+ ad_script_names = opts.data.get("ad_script_names", SCRIPT_DEFAULT)
296
+ script_names_set = {
297
+ name
298
+ for script_name in ad_script_names.split(",")
299
+ for name in (script_name, script_name.strip())
300
+ }
301
+
302
+ if args.ad_controlnet_model != "None":
303
+ script_names_set.add("controlnet")
304
+
305
+ filtered_alwayson = []
306
+ for script_object in script_runner.alwayson_scripts:
307
+ filepath = script_object.filename
308
+ filename = Path(filepath).stem
309
+ if filename in script_names_set:
310
+ filtered_alwayson.append(script_object)
311
+ if filename == "controlnet":
312
+ self.cn_script = script_object
313
+ self.cn_latest_network = script_object.latest_network
314
+
315
+ script_runner.alwayson_scripts = filtered_alwayson
316
+ return script_runner, script_args
317
+
318
+ def disable_controlnet_units(self, script_args: list[Any]) -> None:
319
+ for obj in script_args:
320
+ if "controlnet" in obj.__class__.__name__.lower():
321
+ if hasattr(obj, "enabled"):
322
+ obj.enabled = False
323
+ if hasattr(obj, "input_mode"):
324
+ obj.input_mode = getattr(obj.input_mode, "SIMPLE", "simple")
325
+
326
+ elif isinstance(obj, dict) and "module" in obj:
327
+ obj["enabled"] = False
328
+
329
+ def get_i2i_p(self, p, args: ADetailerArgs, image):
330
+ seed, subseed = self.get_seed(p)
331
+ width, height = self.get_width_height(p, args)
332
+ steps = self.get_steps(p, args)
333
+ cfg_scale = self.get_cfg_scale(p, args)
334
+
335
+ sampler_name = p.sampler_name
336
+ if sampler_name in ["PLMS", "UniPC"]:
337
+ sampler_name = "Euler"
338
+
339
+ i2i = StableDiffusionProcessingImg2Img(
340
+ init_images=[image],
341
+ resize_mode=0,
342
+ denoising_strength=args.ad_denoising_strength,
343
+ mask=None,
344
+ mask_blur=args.ad_mask_blur,
345
+ inpainting_fill=1,
346
+ inpaint_full_res=args.ad_inpaint_only_masked,
347
+ inpaint_full_res_padding=args.ad_inpaint_only_masked_padding,
348
+ inpainting_mask_invert=0,
349
+ sd_model=p.sd_model,
350
+ outpath_samples=p.outpath_samples,
351
+ outpath_grids=p.outpath_grids,
352
+ prompt="", # replace later
353
+ negative_prompt="",
354
+ styles=p.styles,
355
+ seed=seed,
356
+ subseed=subseed,
357
+ subseed_strength=p.subseed_strength,
358
+ seed_resize_from_h=p.seed_resize_from_h,
359
+ seed_resize_from_w=p.seed_resize_from_w,
360
+ sampler_name=sampler_name,
361
+ batch_size=1,
362
+ n_iter=1,
363
+ steps=steps,
364
+ cfg_scale=cfg_scale,
365
+ width=width,
366
+ height=height,
367
+ restore_faces=args.ad_restore_face,
368
+ tiling=p.tiling,
369
+ extra_generation_params=p.extra_generation_params,
370
+ do_not_save_samples=True,
371
+ do_not_save_grid=True,
372
+ )
373
+
374
+ i2i.scripts, i2i.script_args = self.script_filter(p, args)
375
+ i2i._disable_adetailer = True
376
+
377
+ if args.ad_controlnet_model != "None":
378
+ self.update_controlnet_args(i2i, args)
379
+ else:
380
+ i2i.control_net_enabled = False
381
+
382
+ return i2i
383
+
384
+ def save_image(self, p, image, *, condition: str, suffix: str) -> None:
385
+ i = p._idx
386
+ seed, _ = self.get_seed(p)
387
+
388
+ if opts.data.get(condition, False):
389
+ images.save_image(
390
+ image=image,
391
+ path=p.outpath_samples,
392
+ basename="",
393
+ seed=seed,
394
+ prompt=p.all_prompts[i] if i < len(p.all_prompts) else p.prompt,
395
+ extension=opts.samples_format,
396
+ info=self.infotext(p),
397
+ p=p,
398
+ suffix=suffix,
399
+ )
400
+
401
+ def get_ad_model(self, name: str):
402
+ if name not in model_mapping:
403
+ msg = f"[-] ADetailer: Model {name!r} not found. Available models: {list(model_mapping.keys())}"
404
+ raise ValueError(msg)
405
+ return model_mapping[name]
406
+
407
+ def sort_bboxes(self, pred: PredictOutput) -> PredictOutput:
408
+ sortby = opts.data.get("ad_bbox_sortby", BBOX_SORTBY[0])
409
+ sortby_idx = BBOX_SORTBY.index(sortby)
410
+ pred = sort_bboxes(pred, sortby_idx)
411
+ return pred
412
+
413
+ def pred_preprocessing(self, pred: PredictOutput, args: ADetailerArgs):
414
+ pred = filter_by_ratio(
415
+ pred, low=args.ad_mask_min_ratio, high=args.ad_mask_max_ratio
416
+ )
417
+ pred = self.sort_bboxes(pred)
418
+ return mask_preprocess(
419
+ pred.masks,
420
+ kernel=args.ad_dilate_erode,
421
+ x_offset=args.ad_x_offset,
422
+ y_offset=args.ad_y_offset,
423
+ merge_invert=args.ad_mask_merge_invert,
424
+ )
425
+
426
+ def i2i_prompts_replace(
427
+ self, i2i, prompts: list[str], negative_prompts: list[str], j: int
428
+ ) -> None:
429
+ i1 = min(j, len(prompts) - 1)
430
+ i2 = min(j, len(negative_prompts) - 1)
431
+ prompt = prompts[i1]
432
+ negative_prompt = negative_prompts[i2]
433
+ i2i.prompt = prompt
434
+ i2i.negative_prompt = negative_prompt
435
+
436
+ def is_need_call_process(self, p) -> bool:
437
+ i = p._idx
438
+ n_iter = p.iteration
439
+ bs = p.batch_size
440
+ return (i == (n_iter + 1) * bs - 1) and (i != len(p.all_prompts) - 1)
441
+
442
+ def process(self, p, *args_):
443
+ if getattr(p, "_disable_adetailer", False):
444
+ return
445
+
446
+ if self.is_ad_enabled(*args_):
447
+ arg_list = self.get_args(*args_)
448
+ extra_params = self.extra_params(arg_list)
449
+ p.extra_generation_params.update(extra_params)
450
+
451
+ p._idx = -1
452
+
453
+ def _postprocess_image(self, p, pp, args: ADetailerArgs, *, n: int = 0) -> bool:
454
+ """
455
+ Returns
456
+ -------
457
+ bool
458
+
459
+ `True` if image was processed, `False` otherwise.
460
+ """
461
+ if state.interrupted:
462
+ return False
463
+
464
+ i = p._idx
465
+
466
+ i2i = self.get_i2i_p(p, args, pp.image)
467
+ seed, subseed = self.get_seed(p)
468
+ ad_prompts, ad_negatives = self.get_prompt(p, args)
469
+
470
+ is_mediapipe = args.ad_model.lower().startswith("mediapipe")
471
+
472
+ kwargs = {}
473
+ if is_mediapipe:
474
+ predictor = mediapipe_predict
475
+ ad_model = args.ad_model
476
+ else:
477
+ predictor = ultralytics_predict
478
+ ad_model = self.get_ad_model(args.ad_model)
479
+ kwargs["device"] = self.ultralytics_device
480
+
481
+ with change_torch_load():
482
+ pred = predictor(ad_model, pp.image, args.ad_confidence, **kwargs)
483
+
484
+ masks = self.pred_preprocessing(pred, args)
485
+
486
+ if not masks:
487
+ print(
488
+ f"[-] ADetailer: nothing detected on image {i + 1} with {ordinal(n + 1)} settings."
489
+ )
490
+ return False
491
+
492
+ self.save_image(
493
+ p,
494
+ pred.preview,
495
+ condition="ad_save_previews",
496
+ suffix="-ad-preview" + suffix(n, "-"),
497
+ )
498
+
499
+ steps = len(masks)
500
+ processed = None
501
+ state.job_count += steps
502
+
503
+ if is_mediapipe:
504
+ print(f"mediapipe: {steps} detected.")
505
+
506
+ p2 = copy(i2i)
507
+ for j in range(steps):
508
+ p2.image_mask = masks[j]
509
+ self.i2i_prompts_replace(p2, ad_prompts, ad_negatives, j)
510
+
511
+ if not re.match(r"^\s*\[SKIP\]\s*$", p2.prompt):
512
+ if args.ad_controlnet_model == "None":
513
+ cn_restore_unet_hook(p2, self.cn_latest_network)
514
+ processed = process_images(p2)
515
+
516
+ p2 = copy(i2i)
517
+ p2.init_images = [processed.images[0]]
518
+
519
+ p2.seed = seed + j + 1
520
+ p2.subseed = subseed + j + 1
521
+
522
+ if processed is not None:
523
+ pp.image = processed.images[0]
524
+ return True
525
+
526
+ return False
527
+
528
+ def postprocess_image(self, p, pp, *args_):
529
+ if getattr(p, "_disable_adetailer", False):
530
+ return
531
+
532
+ if not self.is_ad_enabled(*args_):
533
+ return
534
+
535
+ p._idx = getattr(p, "_idx", -1) + 1
536
+ init_image = copy(pp.image)
537
+ arg_list = self.get_args(*args_)
538
+
539
+ is_processed = False
540
+ with CNHijackRestore(), pause_total_tqdm(), cn_allow_script_control():
541
+ for n, args in enumerate(arg_list):
542
+ if args.ad_model == "None":
543
+ continue
544
+ is_processed |= self._postprocess_image(p, pp, args, n=n)
545
+
546
+ if is_processed:
547
+ self.save_image(
548
+ p, init_image, condition="ad_save_images_before", suffix="-ad-before"
549
+ )
550
+
551
+ if self.cn_script is not None and self.is_need_call_process(p):
552
+ self.cn_script.process(p)
553
+
554
+ try:
555
+ if p._idx == len(p.all_prompts) - 1:
556
+ self.write_params_txt(p)
557
+ except Exception:
558
+ pass
559
+
560
+
561
+ def on_after_component(component, **_kwargs):
562
+ global txt2img_submit_button, img2img_submit_button
563
+ if getattr(component, "elem_id", None) == "txt2img_generate":
564
+ txt2img_submit_button = component
565
+ return
566
+
567
+ if getattr(component, "elem_id", None) == "img2img_generate":
568
+ img2img_submit_button = component
569
+
570
+
571
+ def on_ui_settings():
572
+ section = ("ADetailer", AFTER_DETAILER)
573
+ shared.opts.add_option(
574
+ "ad_max_models",
575
+ shared.OptionInfo(
576
+ default=2,
577
+ label="Max models",
578
+ component=gr.Slider,
579
+ component_args={"minimum": 1, "maximum": 5, "step": 1},
580
+ section=section,
581
+ ),
582
+ )
583
+
584
+ shared.opts.add_option(
585
+ "ad_save_previews",
586
+ shared.OptionInfo(False, "Save mask previews", section=section),
587
+ )
588
+
589
+ shared.opts.add_option(
590
+ "ad_save_images_before",
591
+ shared.OptionInfo(False, "Save images before ADetailer", section=section),
592
+ )
593
+
594
+ shared.opts.add_option(
595
+ "ad_only_seleted_scripts",
596
+ shared.OptionInfo(
597
+ True, "Apply only selected scripts to ADetailer", section=section
598
+ ),
599
+ )
600
+
601
+ textbox_args = {
602
+ "placeholder": "comma-separated list of script names",
603
+ "interactive": True,
604
+ }
605
+
606
+ shared.opts.add_option(
607
+ "ad_script_names",
608
+ shared.OptionInfo(
609
+ default=SCRIPT_DEFAULT,
610
+ label="Script names to apply to ADetailer (separated by comma)",
611
+ component=gr.Textbox,
612
+ component_args=textbox_args,
613
+ section=section,
614
+ ),
615
+ )
616
+
617
+ shared.opts.add_option(
618
+ "ad_bbox_sortby",
619
+ shared.OptionInfo(
620
+ default="None",
621
+ label="Sort bounding boxes by",
622
+ component=gr.Radio,
623
+ component_args={"choices": BBOX_SORTBY},
624
+ section=section,
625
+ ),
626
+ )
627
+
628
+
629
+ script_callbacks.on_ui_settings(on_ui_settings)
630
+ script_callbacks.on_after_component(on_after_component)
sd_webui/__init__.py ADDED
File without changes
sd_webui/images.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from PIL import Image, PngImagePlugin
7
+
8
+ from sd_webui.processing import StableDiffusionProcessing
9
+
10
+ def save_image(
11
+ image: Image.Image,
12
+ path: str,
13
+ basename: str,
14
+ seed: int | None = None,
15
+ prompt: str = "",
16
+ extension: str = "png",
17
+ info: str | PngImagePlugin.iTXt = "",
18
+ short_filename: bool = False,
19
+ no_prompt: bool = False,
20
+ grid: bool = False,
21
+ pnginfo_section_name: str = "parameters",
22
+ p: StableDiffusionProcessing | None = None,
23
+ existing_info: dict | None = None,
24
+ forced_filename: str | None = None,
25
+ suffix: str = "",
26
+ save_to_dirs: bool = False,
27
+ ) -> tuple[str, str | None]:
28
+ """Save an image.
29
+
30
+ Args:
31
+ image (`PIL.Image`):
32
+ The image to be saved.
33
+ path (`str`):
34
+ The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
35
+ basename (`str`):
36
+ The base filename which will be applied to `filename pattern`.
37
+ seed, prompt, short_filename,
38
+ extension (`str`):
39
+ Image file extension, default is `png`.
40
+ pngsectionname (`str`):
41
+ Specify the name of the section which `info` will be saved in.
42
+ info (`str` or `PngImagePlugin.iTXt`):
43
+ PNG info chunks.
44
+ existing_info (`dict`):
45
+ Additional PNG info. `existing_info == {pngsectionname: info, ...}`
46
+ no_prompt:
47
+ TODO I don't know its meaning.
48
+ p (`StableDiffusionProcessing`)
49
+ forced_filename (`str`):
50
+ If specified, `basename` and filename pattern will be ignored.
51
+ save_to_dirs (bool):
52
+ If true, the image will be saved into a subdirectory of `path`.
53
+
54
+ Returns: (fullfn, txt_fullfn)
55
+ fullfn (`str`):
56
+ The full path of the saved imaged.
57
+ txt_fullfn (`str` or None):
58
+ If a text file is saved for this image, this will be its full path. Otherwise None.
59
+ """
60
+
61
+ else:
62
+ from modules.images import save_image
sd_webui/paths.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ import os
7
+
8
+ models_path = os.path.join(os.path.dirname(__file__), "1")
9
+ script_path = os.path.join(os.path.dirname(__file__), "2")
10
+ data_path = os.path.join(os.path.dirname(__file__), "3")
11
+ extensions_dir = os.path.join(os.path.dirname(__file__), "4")
12
+ extensions_builtin_dir = os.path.join(os.path.dirname(__file__), "5")
13
+ else:
14
+ from modules.paths import data_path, models_path, script_path
sd_webui/processing.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from dataclasses import dataclass, field
7
+ from typing import Any, Callable
8
+
9
+ import numpy as np
10
+ import torch
11
+ from PIL import Image
12
+
13
+ def _image():
14
+ return Image.new("L", (512, 512))
15
+
16
+ @dataclass
17
+ class StableDiffusionProcessing:
18
+ sd_model: torch.nn.Module = field(default_factory=lambda: torch.nn.Linear(1, 1))
19
+ outpath_samples: str = ""
20
+ outpath_grids: str = ""
21
+ prompt: str = ""
22
+ prompt_for_display: str = ""
23
+ negative_prompt: str = ""
24
+ styles: list[str] = field(default_factory=list)
25
+ seed: int = -1
26
+ subseed: int = -1
27
+ subseed_strength: float = 0.0
28
+ seed_resize_from_h: int = -1
29
+ seed_resize_from_w: int = -1
30
+ sampler_name: str | None = None
31
+ batch_size: int = 1
32
+ n_iter: int = 1
33
+ steps: int = 50
34
+ cfg_scale: float = 7.0
35
+ width: int = 512
36
+ height: int = 512
37
+ restore_faces: bool = False
38
+ tiling: bool = False
39
+ do_not_save_samples: bool = False
40
+ do_not_save_grid: bool = False
41
+ extra_generation_params: dict[str, Any] = field(default_factory=dict)
42
+ overlay_images: list[Image.Image] = field(default_factory=list)
43
+ eta: float = 0.0
44
+ do_not_reload_embeddings: bool = False
45
+ paste_to: tuple[int | float, ...] = (0, 0, 0, 0)
46
+ color_corrections: list[np.ndarray] = field(default_factory=list)
47
+ denoising_strength: float = 0.0
48
+ sampler_noise_scheduler_override: Callable | None = None
49
+ ddim_discretize: str = ""
50
+ s_min_uncond: float = 0.0
51
+ s_churn: float = 0.0
52
+ s_tmin: float = 0.0
53
+ s_tmax: float = 0.0
54
+ s_noise: float = 0.0
55
+ override_settings: dict[str, Any] = field(default_factory=dict)
56
+ override_settings_restore_afterwards: bool = False
57
+ is_using_inpainting_conditioning: bool = False
58
+ disable_extra_networks: bool = False
59
+ scripts: Any = None
60
+ script_args: list[Any] = field(default_factory=list)
61
+ all_prompts: list[str] = field(default_factory=list)
62
+ all_negative_prompts: list[str] = field(default_factory=list)
63
+ all_seeds: list[int] = field(default_factory=list)
64
+ all_subseeds: list[int] = field(default_factory=list)
65
+ iteration: int = 1
66
+ is_hr_pass: bool = False
67
+
68
+ @dataclass
69
+ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
70
+ sampler: Callable | None = None
71
+ enable_hr: bool = False
72
+ denoising_strength: float = 0.75
73
+ hr_scale: float = 2.0
74
+ hr_upscaler: str = ""
75
+ hr_second_pass_steps: int = 0
76
+ hr_resize_x: int = 0
77
+ hr_resize_y: int = 0
78
+ hr_upscale_to_x: int = 0
79
+ hr_upscale_to_y: int = 0
80
+ width: int = 512
81
+ height: int = 512
82
+ truncate_x: int = 512
83
+ truncate_y: int = 512
84
+ applied_old_hires_behavior_to: tuple[int, int] = (512, 512)
85
+
86
+ @dataclass
87
+ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
88
+ sampler: Callable | None = None
89
+ init_images: list[Image.Image] = field(default_factory=list)
90
+ resize_mode: int = 0
91
+ denoising_strength: float = 0.75
92
+ image_cfg_scale: float | None = None
93
+ init_latent: torch.Tensor | None = None
94
+ image_mask: Image.Image = field(default_factory=_image)
95
+ latent_mask: Image.Image = field(default_factory=_image)
96
+ mask_for_overlay: Image.Image = field(default_factory=_image)
97
+ mask_blur: int = 4
98
+ inpainting_fill: int = 0
99
+ inpaint_full_res: bool = True
100
+ inpaint_full_res_padding: int = 0
101
+ inpainting_mask_invert: int | bool = 0
102
+ initial_noise_multiplier: float = 1.0
103
+ mask: torch.Tensor | None = None
104
+ nmask: torch.Tensor | None = None
105
+ image_conditioning: torch.Tensor | None = None
106
+
107
+ @dataclass
108
+ class Processed:
109
+ images: list[Image.Image] = field(default_factory=list)
110
+ prompt: list[str] = field(default_factory=list)
111
+ negative_prompt: list[str] = field(default_factory=list)
112
+ seed: list[int] = field(default_factory=list)
113
+ subseed: list[int] = field(default_factory=list)
114
+ subseed_strength: float = 0.0
115
+ info: str = ""
116
+ comments: str = ""
117
+ width: int = 512
118
+ height: int = 512
119
+ sampler_name: str = ""
120
+ cfg_scale: float = 7.0
121
+ image_cfg_scale: float | None = None
122
+ steps: int = 50
123
+ batch_size: int = 1
124
+ restore_faces: bool = False
125
+ face_restoration_model: str | None = None
126
+ sd_model_hash: str = ""
127
+ seed_resize_from_w: int = -1
128
+ seed_resize_from_h: int = -1
129
+ denoising_strength: float = 0.0
130
+ extra_generation_params: dict[str, Any] = field(default_factory=dict)
131
+ index_of_first_image: int = 0
132
+ styles: list[str] = field(default_factory=list)
133
+ job_timestamp: str = ""
134
+ clip_skip: int = 1
135
+ eta: float = 0.0
136
+ ddim_discretize: str = ""
137
+ s_churn: float = 0.0
138
+ s_tmin: float = 0.0
139
+ s_tmax: float = 0.0
140
+ s_noise: float = 0.0
141
+ sampler_noise_scheduler_override: Callable | None = None
142
+ is_using_inpainting_conditioning: bool = False
143
+ all_prompts: list[str] = field(default_factory=list)
144
+ all_negative_prompts: list[str] = field(default_factory=list)
145
+ all_seeds: list[int] = field(default_factory=list)
146
+ all_subseeds: list[int] = field(default_factory=list)
147
+ infotexts: list[str] = field(default_factory=list)
148
+
149
+ def create_infotext(
150
+ p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
151
+ all_prompts: list[str],
152
+ all_seeds: list[int],
153
+ all_subseeds: list[int],
154
+ comments: Any,
155
+ iteration: int = 0,
156
+ position_in_batch: int = 0,
157
+ ) -> str:
158
+ pass
159
+
160
+ def process_images(
161
+ p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
162
+ ) -> Processed:
163
+ pass
164
+
165
+ else:
166
+ from modules.processing import (
167
+ StableDiffusionProcessing,
168
+ StableDiffusionProcessingImg2Img,
169
+ StableDiffusionProcessingTxt2Img,
170
+ create_infotext,
171
+ process_images,
172
+ )
sd_webui/safe.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ import torch
7
+
8
+ unsafe_torch_load = torch.load
9
+ else:
10
+ from modules.safe import unsafe_torch_load
sd_webui/script_callbacks.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from typing import Callable
7
+
8
+ def on_ui_settings(callback: Callable):
9
+ pass
10
+
11
+ def on_after_component(callback: Callable):
12
+ pass
13
+
14
+ else:
15
+ from modules.script_callbacks import on_after_component, on_ui_settings
sd_webui/scripts.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from abc import ABC, abstractmethod
7
+ from dataclasses import dataclass
8
+ from typing import Any
9
+
10
+ import gradio as gr
11
+ from PIL import Image
12
+
13
+ from sd_webui.processing import (
14
+ Processed,
15
+ StableDiffusionProcessingImg2Img,
16
+ StableDiffusionProcessingTxt2Img,
17
+ )
18
+
19
+ SDPType = StableDiffusionProcessingImg2Img | StableDiffusionProcessingTxt2Img
20
+ AlwaysVisible = object()
21
+
22
+ @dataclass
23
+ class PostprocessImageArgs:
24
+ image: Image.Image
25
+
26
+ class Script(ABC):
27
+ filename: str
28
+ args_from: int
29
+ args_to: int
30
+ alwayson: bool
31
+
32
+ is_txt2img: bool
33
+ is_img2img: bool
34
+
35
+ group: gr.Group
36
+ infotext_fields: list[tuple[str, str]]
37
+ paste_field_names: list[str]
38
+
39
+ @abstractmethod
40
+ def title(self):
41
+ raise NotImplementedError
42
+
43
+ def ui(self, is_img2img: bool):
44
+ pass
45
+
46
+ def show(self, is_img2img: bool):
47
+ return True
48
+
49
+ def run(self, p: SDPType, *args):
50
+ pass
51
+
52
+ def process(self, p: SDPType, *args):
53
+ pass
54
+
55
+ def before_process_batch(self, p: SDPType, *args, **kwargs):
56
+ pass
57
+
58
+ def process_batch(self, p: SDPType, *args, **kwargs):
59
+ pass
60
+
61
+ def postprocess_batch(self, p: SDPType, *args, **kwargs):
62
+ pass
63
+
64
+ def postprocess_image(self, p: SDPType, pp: PostprocessImageArgs, *args):
65
+ pass
66
+
67
+ def postprocess(self, p: SDPType, processed: Processed, *args):
68
+ pass
69
+
70
+ def before_component(self, component, **kwargs):
71
+ pass
72
+
73
+ def after_component(self, component, **kwargs):
74
+ pass
75
+
76
+ def describe(self):
77
+ return ""
78
+
79
+ def elem_id(self, item_id: Any) -> str:
80
+ pass
81
+
82
+ else:
83
+ from modules.scripts import AlwaysVisible, PostprocessImageArgs, Script
sd_webui/shared.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ import argparse
7
+ from dataclasses import dataclass
8
+ from typing import Any, Callable
9
+
10
+ import torch
11
+ from PIL import Image
12
+
13
+ @dataclass
14
+ class State:
15
+ skipped: bool = False
16
+ interrupted: bool = False
17
+ job: str = ""
18
+ job_no: int = 0
19
+ job_count: int = 0
20
+ processing_has_refined_job_count: bool = False
21
+ job_timestamp: str = "0"
22
+ sampling_step: int = 0
23
+ sampling_steps: int = 0
24
+ current_latent: torch.Tensor | None = None
25
+ current_image: Image.Image | None = None
26
+ current_image_sampling_step: int = 0
27
+ id_live_preview: int = 0
28
+ textinfo: str | None = None
29
+ time_start: float | None = None
30
+ need_restart: bool = False
31
+ server_start: float | None = None
32
+
33
+ @dataclass
34
+ class OptionInfo:
35
+ default: Any = None
36
+ label: str = ""
37
+ component: Any = None
38
+ component_args: Callable[[], dict] | dict[str, Any] | None = None
39
+ onchange: Callable[[], None] | None = None
40
+ section: tuple[str, str] | None = None
41
+ refresh: Callable[[], None] | None = None
42
+
43
+ class Option:
44
+ data_labels: dict[str, OptionInfo]
45
+
46
+ def __init__(self):
47
+ self.data: dict[str, Any] = {}
48
+
49
+ def add_option(self, key: str, info: OptionInfo):
50
+ pass
51
+
52
+ def __getattr__(self, item: str):
53
+ if self.data is not None and item in self.data:
54
+ return self.data[item]
55
+
56
+ if item in self.data_labels:
57
+ return self.data_labels[item].default
58
+
59
+ return super().__getattribute__(item)
60
+
61
+ opts = Option()
62
+ cmd_opts = argparse.Namespace()
63
+ state = State()
64
+
65
+ else:
66
+ from modules.shared import OptionInfo, cmd_opts, opts, state