crystantine
commited on
Commit
•
821e6d6
1
Parent(s):
f822d87
Upload 67 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- comfy_mtb/.dockerignore +34 -0
- comfy_mtb/.github/CODEOWNERS +5 -0
- comfy_mtb/.github/FUNDING.yml +14 -0
- comfy_mtb/.github/ISSUE_TEMPLATE/bug_report.yml +67 -0
- comfy_mtb/.github/ISSUE_TEMPLATE/config.yml +1 -0
- comfy_mtb/.github/ISSUE_TEMPLATE/feature_request.yml +35 -0
- comfy_mtb/.github/workflows/package_wheels.yml +79 -0
- comfy_mtb/.github/workflows/release.yml +115 -0
- comfy_mtb/.github/workflows/test_embedded.yml +71 -0
- comfy_mtb/.gitignore +5 -0
- comfy_mtb/.gitmodules +9 -0
- comfy_mtb/.prettierrc +6 -0
- comfy_mtb/.release_ignore +4 -0
- comfy_mtb/INSTALL-CN.md +93 -0
- comfy_mtb/INSTALL-JP.md +93 -0
- comfy_mtb/INSTALL.md +91 -0
- comfy_mtb/LICENSE +21 -0
- comfy_mtb/README-CN.md +99 -0
- comfy_mtb/README-JP.md +96 -0
- comfy_mtb/README.md +105 -0
- comfy_mtb/__init__.py +320 -0
- comfy_mtb/endpoint.py +185 -0
- comfy_mtb/examples/01-faceswap.json +1048 -0
- comfy_mtb/examples/02-film_interpolation.json +1327 -0
- comfy_mtb/examples/03-animation_builder-condition-lerp.json +1 -0
- comfy_mtb/examples/04-animation_builder-deforum.json +1 -0
- comfy_mtb/examples/README.md +12 -0
- comfy_mtb/font.ttf +0 -0
- comfy_mtb/html/style.css +133 -0
- comfy_mtb/install.py +632 -0
- comfy_mtb/log.py +79 -0
- comfy_mtb/node_list.json +46 -0
- comfy_mtb/nodes/__init__.py +0 -0
- comfy_mtb/nodes/animation.py +44 -0
- comfy_mtb/nodes/conditions.py +181 -0
- comfy_mtb/nodes/crop.py +288 -0
- comfy_mtb/nodes/debug.py +121 -0
- comfy_mtb/nodes/deep_bump.py +302 -0
- comfy_mtb/nodes/faceenhance.py +262 -0
- comfy_mtb/nodes/faceswap.py +233 -0
- comfy_mtb/nodes/generate.py +286 -0
- comfy_mtb/nodes/graph_utils.py +244 -0
- comfy_mtb/nodes/image_interpolation.py +154 -0
- comfy_mtb/nodes/image_processing.py +617 -0
- comfy_mtb/nodes/io.py +195 -0
- comfy_mtb/nodes/latent_processing.py +33 -0
- comfy_mtb/nodes/mask.py +105 -0
- comfy_mtb/nodes/number.py +87 -0
- comfy_mtb/nodes/transform.py +110 -0
- comfy_mtb/nodes/video.py +251 -0
comfy_mtb/.dockerignore
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Include any files or directories that you don't want to be copied to your
|
2 |
+
# container here (e.g., local build artifacts, temporary files, etc.).
|
3 |
+
#
|
4 |
+
# For more help, visit the .dockerignore file reference guide at
|
5 |
+
# https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
6 |
+
|
7 |
+
**/.DS_Store
|
8 |
+
**/__pycache__
|
9 |
+
**/.venv
|
10 |
+
**/.classpath
|
11 |
+
**/.dockerignore
|
12 |
+
**/.env
|
13 |
+
**/.git
|
14 |
+
**/.gitignore
|
15 |
+
**/.project
|
16 |
+
**/.settings
|
17 |
+
**/.toolstarget
|
18 |
+
**/.vs
|
19 |
+
**/.vscode
|
20 |
+
**/*.*proj.user
|
21 |
+
**/*.dbmdl
|
22 |
+
**/*.jfm
|
23 |
+
**/bin
|
24 |
+
**/charts
|
25 |
+
**/docker-compose*
|
26 |
+
**/compose*
|
27 |
+
**/Dockerfile*
|
28 |
+
**/node_modules
|
29 |
+
**/npm-debug.log
|
30 |
+
**/obj
|
31 |
+
**/secrets.dev.yaml
|
32 |
+
**/values.dev.yaml
|
33 |
+
LICENSE
|
34 |
+
README.md
|
comfy_mtb/.github/CODEOWNERS
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
* @melMass
|
2 |
+
extern/GFPGAN/* @TencentARC
|
3 |
+
extern/SadTalker/* @OpenTalker
|
4 |
+
nodes/deep_bump.py @HugoTini
|
5 |
+
web/imageFeed.js @pythongosssss @melMass
|
comfy_mtb/.github/FUNDING.yml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# These are supported funding model platforms
|
2 |
+
|
3 |
+
github: [melMass]
|
4 |
+
custom: ["https://www.buymeacoffee.com/melmass"]
|
5 |
+
patreon: # Replace with a single Patreon username
|
6 |
+
open_collective: # Replace with a single Open Collective username
|
7 |
+
ko_fi: # Replace with a single Ko-fi username
|
8 |
+
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
9 |
+
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
10 |
+
liberapay: # Replace with a single Liberapay username
|
11 |
+
issuehunt: # Replace with a single IssueHunt username
|
12 |
+
otechie: # Replace with a single Otechie username
|
13 |
+
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
14 |
+
|
comfy_mtb/.github/ISSUE_TEMPLATE/bug_report.yml
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: 🐞 Bug Report
|
2 |
+
title: "[bug] "
|
3 |
+
description: Report a bug
|
4 |
+
labels: ["type: 🐛 bug", "status: 🧹 needs triage"]
|
5 |
+
|
6 |
+
body:
|
7 |
+
- type: markdown
|
8 |
+
attributes:
|
9 |
+
value: |
|
10 |
+
## Before submiting an issue
|
11 |
+
- Make sure to read the README & INSTALL instructions.
|
12 |
+
- Please search for [existing issues](https://github.com/melMass/comfy_mtb/issues?q=is%3Aissue) around your problem before filing a report.
|
13 |
+
|
14 |
+
### Try using the debug mode to get more info
|
15 |
+
|
16 |
+
If you use the env variable `MTB_DEBUG=true`, debug message from the extension will appear in the terminal.
|
17 |
+
|
18 |
+
- type: textarea
|
19 |
+
id: description
|
20 |
+
attributes:
|
21 |
+
label: Describe the bug
|
22 |
+
description: A clear description of what the bug is. Include screenshots if applicable.
|
23 |
+
placeholder: Bug description
|
24 |
+
validations:
|
25 |
+
required: true
|
26 |
+
|
27 |
+
- type: textarea
|
28 |
+
id: reproduction
|
29 |
+
attributes:
|
30 |
+
label: Reproduction
|
31 |
+
description: Steps to reproduce the behavior.
|
32 |
+
placeholder: |
|
33 |
+
1. Add node xxx ...
|
34 |
+
2. Connect to xxx ...
|
35 |
+
3. See error
|
36 |
+
|
37 |
+
- type: textarea
|
38 |
+
id: expected-behavior
|
39 |
+
attributes:
|
40 |
+
label: Expected behavior
|
41 |
+
description: A clear description of what you expected to happen.
|
42 |
+
|
43 |
+
- type: textarea
|
44 |
+
id: info
|
45 |
+
attributes:
|
46 |
+
label: Platform and versions
|
47 |
+
description: "informations about the environment you run Comfy in"
|
48 |
+
render: sh
|
49 |
+
placeholder: |
|
50 |
+
- OS: [e.g. Linux]
|
51 |
+
- Comfy Mode [e.g. custom env, standalone, google colab]
|
52 |
+
|
53 |
+
validations:
|
54 |
+
required: true
|
55 |
+
|
56 |
+
- type: textarea
|
57 |
+
id: logs
|
58 |
+
attributes:
|
59 |
+
label: Console output
|
60 |
+
description: Paste the console output without backticks
|
61 |
+
render: sh
|
62 |
+
|
63 |
+
- type: textarea
|
64 |
+
id: context
|
65 |
+
attributes:
|
66 |
+
label: Additional context
|
67 |
+
description: Add any other context about the problem here.
|
comfy_mtb/.github/ISSUE_TEMPLATE/config.yml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
blank_issues_enabled: false
|
comfy_mtb/.github/ISSUE_TEMPLATE/feature_request.yml
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: 💡 Feature Request
|
2 |
+
title: "[feat] "
|
3 |
+
description: Suggest an idea
|
4 |
+
labels: ["type: 🤚 feature request"]
|
5 |
+
|
6 |
+
body:
|
7 |
+
- type: textarea
|
8 |
+
id: problem
|
9 |
+
attributes:
|
10 |
+
label: Describe the problem
|
11 |
+
description: A clear description of the problem this feature would solve
|
12 |
+
placeholder: "I'm always frustrated when..."
|
13 |
+
validations:
|
14 |
+
required: true
|
15 |
+
|
16 |
+
- type: textarea
|
17 |
+
id: solution
|
18 |
+
attributes:
|
19 |
+
label: "Describe the solution you'd like"
|
20 |
+
description: A clear description of what change you would like
|
21 |
+
placeholder: "I would like to..."
|
22 |
+
validations:
|
23 |
+
required: true
|
24 |
+
|
25 |
+
- type: textarea
|
26 |
+
id: alternatives
|
27 |
+
attributes:
|
28 |
+
label: Alternatives considered
|
29 |
+
description: "Any alternative solutions you've considered"
|
30 |
+
|
31 |
+
- type: textarea
|
32 |
+
id: context
|
33 |
+
attributes:
|
34 |
+
label: Additional context
|
35 |
+
description: Add any other context about the problem here.
|
comfy_mtb/.github/workflows/package_wheels.yml
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: 📦 Building wheels
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
# push:
|
6 |
+
# tags:
|
7 |
+
# - "v*"
|
8 |
+
|
9 |
+
jobs:
|
10 |
+
build:
|
11 |
+
strategy:
|
12 |
+
fail-fast: false
|
13 |
+
matrix:
|
14 |
+
include:
|
15 |
+
- os: windows-latest
|
16 |
+
platform: win32
|
17 |
+
arch: x64
|
18 |
+
# - os: macos-latest
|
19 |
+
# platform: darwin
|
20 |
+
# arch: arm64
|
21 |
+
- os: ubuntu-latest
|
22 |
+
platform: linux
|
23 |
+
arch: x64
|
24 |
+
runs-on: ${{ matrix.os }}
|
25 |
+
env:
|
26 |
+
archive_name: deps-wheels_${{ matrix.os }}-${{ matrix.platform }}-${{ matrix.arch }}
|
27 |
+
steps:
|
28 |
+
- name: ♻️ Checking out the repository
|
29 |
+
uses: actions/checkout@v3
|
30 |
+
- name: '🐍 Setting up Python'
|
31 |
+
uses: actions/setup-python@v4
|
32 |
+
with:
|
33 |
+
python-version: '3.10.9'
|
34 |
+
|
35 |
+
- name: 📦 Building and Bundling wheels
|
36 |
+
shell: bash
|
37 |
+
run: |
|
38 |
+
python -m pip wheel --no-cache-dir -r reqs.txt -w ./wheels 2>&1 | tee build.log
|
39 |
+
|
40 |
+
# find source wheels
|
41 |
+
packages=$(cat build.log | awk -F 'Building wheels for collected packages: ' '{print $2}')
|
42 |
+
packages=$(echo "$packages" | tr -d '[:space:]')
|
43 |
+
|
44 |
+
IFS=', ' read -r -a package_array <<< "$packages"
|
45 |
+
|
46 |
+
# Save reversed package_array to wheel_order.txt
|
47 |
+
reversed_array=()
|
48 |
+
for ((idx=${#package_array[@]}-1; idx>=0; idx--)); do
|
49 |
+
reversed_array+=("${package_array[idx]}")
|
50 |
+
done
|
51 |
+
printf '%s\n' "${reversed_array[@]}" > ./wheels/wheel_order.txt
|
52 |
+
|
53 |
+
printf "Autodetect this source package: \e[32m%s\e[0m\n" "${package_array[@]}"
|
54 |
+
|
55 |
+
# Iterate through the wheel files and remove those that are not source built
|
56 |
+
for wheel_file in ./wheels/*.whl; do
|
57 |
+
# Extract the package name from the wheel filename
|
58 |
+
package_name=$(basename "$wheel_file" .whl | awk -F '-' '{print $1}')
|
59 |
+
|
60 |
+
printf "Checking package: %s\n" "$package_name"
|
61 |
+
|
62 |
+
# Check if the package is not in the array of source built packages
|
63 |
+
if [[ ! " ${package_array[@]} " =~ " ${package_name} " ]]; then
|
64 |
+
echo "Removing $wheel_file"
|
65 |
+
rm "$wheel_file"
|
66 |
+
fi
|
67 |
+
done
|
68 |
+
|
69 |
+
|
70 |
+
if [ "$RUNNER_OS" == "Windows" ]; then
|
71 |
+
"C:/Program Files/7-Zip/7z.exe" a -mfb=258 -tzip ${{ env.archive_name }}.zip wheels
|
72 |
+
else
|
73 |
+
zip -r -9 -y -m ${{ env.archive_name }}.zip wheels
|
74 |
+
fi
|
75 |
+
- name: 💾 Store cache
|
76 |
+
uses: actions/cache/save@v3
|
77 |
+
with:
|
78 |
+
path: ${{ env.archive_name }}.zip
|
79 |
+
key: ${{ env.archive_name }}-${{ hashFiles('reqs.txt') }}
|
comfy_mtb/.github/workflows/release.yml
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: 📦 Release
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
inputs:
|
6 |
+
name:
|
7 |
+
description: Release tag / name ?
|
8 |
+
required: true
|
9 |
+
default: 'latest'
|
10 |
+
type: string
|
11 |
+
environment:
|
12 |
+
description: Environment to run tests against
|
13 |
+
type: environment
|
14 |
+
required: false
|
15 |
+
# push:
|
16 |
+
# tags:
|
17 |
+
# - "v*"
|
18 |
+
|
19 |
+
jobs:
|
20 |
+
release:
|
21 |
+
permissions:
|
22 |
+
contents: write
|
23 |
+
runs-on: ubuntu-latest
|
24 |
+
env:
|
25 |
+
repo_name: ${{ github.event.repository.name }}
|
26 |
+
steps:
|
27 |
+
- name: ♻️ Checking out the repository
|
28 |
+
uses: actions/checkout@v3
|
29 |
+
with:
|
30 |
+
submodules: 'recursive'
|
31 |
+
path: ${{ env.repo_name }}
|
32 |
+
|
33 |
+
# - name: 📝 Prepare file with paths to remove
|
34 |
+
# run: |
|
35 |
+
# find ${{ env.repo_name }} -type f -size +10M > .release_ignore
|
36 |
+
# find ${{ env.repo_name }} -type d -empty >> .release_ignore
|
37 |
+
# shell: bash
|
38 |
+
|
39 |
+
- name: 🗑️ Remove files and directories listed in .release_ignore
|
40 |
+
shell: bash
|
41 |
+
run: |
|
42 |
+
release_ignore="${{ env.repo_name }}/.release_ignore"
|
43 |
+
if [ -f "$release_ignore" ]; then
|
44 |
+
while IFS= read -r entry || [ -n "$entry" ]; do
|
45 |
+
target="${{ env.repo_name }}/$entry"
|
46 |
+
if [ -e "$target" ]; then
|
47 |
+
if [ -f "$target" ]; then
|
48 |
+
rm "$target"
|
49 |
+
elif [ -d "$target" ]; then
|
50 |
+
rm -r "$target"
|
51 |
+
fi
|
52 |
+
else
|
53 |
+
echo "Warning: $entry does not exist in the repository. Skipping removal."
|
54 |
+
fi
|
55 |
+
done < "$release_ignore"
|
56 |
+
else
|
57 |
+
echo "No .release_ignore file found. Skipping removal of files and directories."
|
58 |
+
fi
|
59 |
+
|
60 |
+
- name: 📦 Building custom comfy nodes
|
61 |
+
shell: bash
|
62 |
+
run: |
|
63 |
+
if [ "$RUNNER_OS" == "Windows" ]; then
|
64 |
+
"C:/Program Files/7-Zip/7z.exe" a -mfb=258 -tzip ${{ env.repo_name }}-${{ inputs.name }}.zip ${{ env.repo_name }}
|
65 |
+
else
|
66 |
+
zip -r -9 -y -m ${{ env.repo_name }}-${{ inputs.name }}.zip ${{ env.repo_name }}
|
67 |
+
fi
|
68 |
+
|
69 |
+
- name: ✅ Create release
|
70 |
+
uses: softprops/action-gh-release@v1
|
71 |
+
with:
|
72 |
+
tag_name: ${{ inputs.name }}
|
73 |
+
files: |
|
74 |
+
${{ env.repo_name }}-${{ inputs.name }}.zip
|
75 |
+
|
76 |
+
release-wheels:
|
77 |
+
permissions:
|
78 |
+
contents: write
|
79 |
+
strategy:
|
80 |
+
fail-fast: true
|
81 |
+
matrix:
|
82 |
+
include:
|
83 |
+
- os: windows-latest
|
84 |
+
platform: win32
|
85 |
+
arch: x64
|
86 |
+
# - os: macos-latest
|
87 |
+
# platform: darwin
|
88 |
+
# arch: arm64
|
89 |
+
- os: ubuntu-latest
|
90 |
+
platform: linux
|
91 |
+
arch: x64
|
92 |
+
runs-on: ${{ matrix.os }}
|
93 |
+
env:
|
94 |
+
archive_name: deps-wheels_${{ matrix.os }}-${{ matrix.platform }}-${{ matrix.arch }}
|
95 |
+
steps:
|
96 |
+
- name: 💾 Restore cache
|
97 |
+
uses: actions/cache/restore@v3
|
98 |
+
id: cache
|
99 |
+
with:
|
100 |
+
path: ${{ env.archive_name }}.zip
|
101 |
+
key: ${{ env.archive_name }}-${{ hashFiles('reqs.txt') }}
|
102 |
+
- name: 📦 Unzip wheels
|
103 |
+
shell: bash
|
104 |
+
run: |
|
105 |
+
mkdir -p wheels
|
106 |
+
unzip -j ${{ env.archive_name }}.zip "**/*.whl" -d wheels
|
107 |
+
unzip -j ${{ env.archive_name }}.zip "**/*.txt" -d wheels
|
108 |
+
if: success()
|
109 |
+
- name: ✅ Add wheels to release
|
110 |
+
uses: softprops/action-gh-release@v1
|
111 |
+
with:
|
112 |
+
tag_name: ${{ inputs.name }}
|
113 |
+
files: |
|
114 |
+
wheels/*.whl
|
115 |
+
wheels/wheel_order.txt
|
comfy_mtb/.github/workflows/test_embedded.yml
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: 🧪 Test Comfy Portable
|
2 |
+
|
3 |
+
on: workflow_dispatch
|
4 |
+
jobs:
|
5 |
+
install-comfy:
|
6 |
+
runs-on: windows-latest
|
7 |
+
env:
|
8 |
+
repo_name: ${{ github.event.repository.name }}
|
9 |
+
steps:
|
10 |
+
- name: ⚡️ Restore Cache if Available
|
11 |
+
id: cache-comfy
|
12 |
+
uses: actions/cache/restore@v3
|
13 |
+
with:
|
14 |
+
path: ComfyUI_windows_portable
|
15 |
+
key: ${{ runner.os }}-comfy-env
|
16 |
+
|
17 |
+
- name: 🚡 Download and Extract Comfy
|
18 |
+
id: download-extract-comfy
|
19 |
+
if: steps.cache-comfy.outputs.cache-hit != 'true'
|
20 |
+
shell: bash
|
21 |
+
run: |
|
22 |
+
mkdir comfy_temp
|
23 |
+
curl -L -o comfy_temp/comfyui.7z https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
|
24 |
+
|
25 |
+
7z x comfy_temp/comfyui.7z -o./comfy_temp
|
26 |
+
|
27 |
+
|
28 |
+
# mv comfy_temp/ComfyUI_windows_portable/python_embeded .
|
29 |
+
# mv comfy_temp/ComfyUI_windows_portable/ComfyUI .
|
30 |
+
# mv comfy_temp/ComfyUI_windows_portable/update .
|
31 |
+
ls
|
32 |
+
mv comfy_temp/ComfyUI_windows_portable .
|
33 |
+
|
34 |
+
- name: 💾 Store cache
|
35 |
+
uses: actions/cache/save@v3
|
36 |
+
if: steps.cache-comfy.outputs.cache-hit != 'true'
|
37 |
+
with:
|
38 |
+
path: ComfyUI_windows_portable
|
39 |
+
key: ${{ runner.os }}-comfy-env
|
40 |
+
- name: ⏬ Install other extensions
|
41 |
+
shell: bash
|
42 |
+
run: |
|
43 |
+
export COMFY_PYTHON="${GITHUB_WORKSPACE}/ComfyUI_windows_portable/python_embeded/python.exe"
|
44 |
+
cd "${GITHUB_WORKSPACE}/ComfyUI_windows_portable/ComfyUI/custom_nodes"
|
45 |
+
|
46 |
+
git clone https://github.com/Fannovel16/comfy_controlnet_preprocessors
|
47 |
+
cd comfy_controlnet_preprocessors
|
48 |
+
$COMFY_PYTHON -m pip install -r requirements.txt
|
49 |
+
|
50 |
+
- name: ♻️ Checking out comfy_mtb to custom_nodes
|
51 |
+
uses: actions/checkout@v3
|
52 |
+
with:
|
53 |
+
submodules: 'recursive'
|
54 |
+
path: ComfyUI_windows_portable/ComfyUI/custom_nodes/${{ env.repo_name }}
|
55 |
+
|
56 |
+
- name: 📦 Install mtb nodes
|
57 |
+
shell: bash
|
58 |
+
run: |
|
59 |
+
# run install
|
60 |
+
export COMFY_PYTHON="${GITHUB_WORKSPACE}/ComfyUI_windows_portable/python_embeded/python.exe"
|
61 |
+
cd "${GITHUB_WORKSPACE}/ComfyUI_windows_portable/ComfyUI/custom_nodes"
|
62 |
+
$COMFY_PYTHON ${{ env.repo_name }}/install.py -w
|
63 |
+
|
64 |
+
- name: ⏬ Import mtb_nodes
|
65 |
+
shell: bash
|
66 |
+
run: |
|
67 |
+
export COMFY_PYTHON="${GITHUB_WORKSPACE}/ComfyUI_windows_portable/python_embeded/python.exe"
|
68 |
+
cd "${GITHUB_WORKSPACE}/ComfyUI_windows_portable/ComfyUI"
|
69 |
+
$COMFY_PYTHON -s main.py --quick-test-for-ci --cpu
|
70 |
+
|
71 |
+
$COMFY_PYTHON -m pip freeze
|
comfy_mtb/.gitignore
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
*.py[cod]
|
3 |
+
*.onnx
|
4 |
+
wheels/
|
5 |
+
node_modules/
|
comfy_mtb/.gitmodules
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "extern/google-FILM"]
|
2 |
+
path = extern/frame_interpolation
|
3 |
+
url = https://github.com/google-research/frame-interpolation
|
4 |
+
[submodule "extern/GFPGAN"]
|
5 |
+
path = extern/GFPGAN
|
6 |
+
url = https://github.com/TencentARC/GFPGAN.git
|
7 |
+
[submodule "extern/frame_interpolation"]
|
8 |
+
path = extern/frame_interpolation
|
9 |
+
url = https://github.com/google-research/frame-interpolation
|
comfy_mtb/.prettierrc
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"semi": false,
|
3 |
+
"singleQuote": true,
|
4 |
+
"tabWidth": 2,
|
5 |
+
"useTabs": false
|
6 |
+
}
|
comfy_mtb/.release_ignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
extern/frame_interpolation/moment.gif
|
2 |
+
extern/frame_interpolation/photos
|
3 |
+
extern/GFPGAN/inputs
|
4 |
+
.git
|
comfy_mtb/INSTALL-CN.md
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 安装
|
2 |
+
- [安装](#安装)
|
3 |
+
- [自动安装(推荐)](#自动安装推荐)
|
4 |
+
- [ComfyUI 管理器](#comfyui-管理器)
|
5 |
+
- [虚拟环境](#虚拟环境)
|
6 |
+
- [模型下载](#模型下载)
|
7 |
+
- [网络扩展](#网络扩展)
|
8 |
+
- [旧的安装方法 (MANUAL)](#旧的安装方法-manual)
|
9 |
+
- [依赖关系](#依赖关系)
|
10 |
+
### 自动安装(推荐)
|
11 |
+
|
12 |
+
### ComfyUI 管理器
|
13 |
+
|
14 |
+
从 0.1.0 版开始,该扩展将使用 [ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager) 进行安装,这对处理各种环境下的各种安装问题大有帮助。
|
15 |
+
|
16 |
+
### 虚拟环境
|
17 |
+
还有一种试验性的单行安装方法,即在 ComfyUI 根目录下使用以下命令进行安装。它将下载代码、安装依赖项并运行安装脚本:
|
18 |
+
|
19 |
+
|
20 |
+
```bash
|
21 |
+
curl -sSL "https://raw.githubusercontent.com/username/repo/main/install.py" | python3 -
|
22 |
+
```
|
23 |
+
|
24 |
+
## 模型下载
|
25 |
+
某些节点需要下载额外的模型,您可以使用与上述相同的 python 环境以交互方式完成下载:
|
26 |
+
|
27 |
+
```bash
|
28 |
+
python scripts/download_models.py
|
29 |
+
```
|
30 |
+
|
31 |
+
然后根据提示或直接按回车键下载每个模型。
|
32 |
+
|
33 |
+
> **Note**
|
34 |
+
> 您可以使用以下方法下载所有型号,无需提示:
|
35 |
+
```bash
|
36 |
+
python scripts/download_models.py -y
|
37 |
+
```
|
38 |
+
|
39 |
+
#### 网络扩展
|
40 |
+
|
41 |
+
首次运行时,脚本会尝试将 [网络扩展](https://github.com/melMass/comfy_mtb/tree/main/web)链接到你的 "web/extensions "文件夹,[请参阅](https://github.com/melMass/comfy_mtb/blob/d982b69a58c05ccead9c49370764beaa4549992a/__init__.py#L45-L61)。
|
42 |
+
|
43 |
+
<img alt="color widget preview" src="https://github.com/melMass/comfy_mtb/assets/7041726/cff7e66a-4cc4-4866-b35b-10af0bb2d110" width=450>
|
44 |
+
|
45 |
+
### 旧的安装方法 (MANUAL)
|
46 |
+
### 依赖关系
|
47 |
+
<details><summary><h4>Custom Virtualenv(我主要用这个)</h4></summary
|
48 |
+
|
49 |
+
1. 确保您处于用于 ComfyUI 的 Python 环境中。
|
50 |
+
2. 运行以下命令安装所需的依赖项:
|
51 |
+
```bash
|
52 |
+
pip install -r comfy_mtb/reqs.txt
|
53 |
+
```
|
54 |
+
|
55 |
+
</details>
|
56 |
+
|
57 |
+
<details><summary><h4>Comfy 便携式/单机版(来自 ComfyUI 版本)</h4></summary>
|
58 |
+
|
59 |
+
如果您使用 ComfyUI 单机版中的 `python-embeded `,那么当二进制文件没有轮子时,您就无法使用 pip 安装二进制文件的依赖项,在这种情况下,请查看最近的 [发布](https://github.com/melMass/comfy_mtb/releases),那里有一个预编译轮子的 linux 和 windows 捆绑包(只有那些需要从源代码编译的轮子),请查看 [此问题 (#1)](https://github.com/melMass/comfy_mtb/issues/1) 以获取更多信息。
|
60 |
+
![image](https://github.com/melMass/comfy_mtb/assets/7041726/2934fa14-3725-427c-8b9e-2b4f60ba1b7b)
|
61 |
+
|
62 |
+
|
63 |
+
</details>
|
64 |
+
|
65 |
+
<details><summary><h4>Google Colab</h4></summary>
|
66 |
+
|
67 |
+
在 **Run ComfyUI with localtunnel (Recommended Way)** 标题之后(代码单元格之前)添加一个新的代码单元格
|
68 |
+
|
69 |
+
![preview of where to add it on colab](https://github.com/melMass/comfy_mtb/assets/7041726/35df2ef1-14f9-44cd-aa65-353829188cd7)
|
70 |
+
|
71 |
+
|
72 |
+
```python
|
73 |
+
# download the nodes
|
74 |
+
!git clone --recursive https://github.com/melMass/comfy_mtb.git custom_nodes/comfy_mtb
|
75 |
+
|
76 |
+
# download all models
|
77 |
+
!python custom_nodes/comfy_mtb/scripts/download_models.py -y
|
78 |
+
|
79 |
+
# install the dependencies
|
80 |
+
!pip install -r custom_nodes/comfy_mtb/reqs.txt -f https://download.openmmlab.com/mmcv/dist/cu118/torch2.0/index.html
|
81 |
+
```
|
82 |
+
|
83 |
+
如果运行后 colab 抱怨需要重新启动运行时,请重新启动,然后不要重新运行之前的单元格,只运行运行本地隧道的单元格。(可能需要先添加一个包含 `%cd ComfyUI` 的单元格)
|
84 |
+
|
85 |
+
|
86 |
+
> **Note**:
|
87 |
+
> If you don't need all models, remove the `-y` as collab actually supports user input: ![image](https://github.com/melMass/comfy_mtb/assets/7041726/40fc3602-f1d4-432a-98fd-ce2240f5ad06)
|
88 |
+
|
89 |
+
> **Preview**
|
90 |
+
> ![image](https://github.com/melMass/comfy_mtb/assets/7041726/b5b2b2d9-f1e8-4c43-b1db-7dfc5e07be86)
|
91 |
+
|
92 |
+
</details>
|
93 |
+
|
comfy_mtb/INSTALL-JP.md
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# インストール
|
2 |
+
|
3 |
+
- [インストール](#インストール)
|
4 |
+
- [自動インストール (推奨)](#自動インストール-推奨)
|
5 |
+
- [ComfyUI マネージャ](#comfyui-マネージャ)
|
6 |
+
- [仮想環境](#仮想環境)
|
7 |
+
- [モデルのダウンロード](#モデルのダウンロード)
|
8 |
+
- [ウェブ拡張機能](#ウェブ拡張機能)
|
9 |
+
- [旧インストール方法 (MANUAL)](#旧インストール方法-manual)
|
10 |
+
- [依存関係](#依存関係)
|
11 |
+
|
12 |
+
|
13 |
+
## 自動インストール (推奨)
|
14 |
+
|
15 |
+
### ComfyUI マネージャ
|
16 |
+
|
17 |
+
バージョン0.1.0では、この拡張機能は[ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager)と一緒にインストールすることを想定しています。これは、様々な環境で直面する様々なインストール問題を処理するのに非常に役立ちます。
|
18 |
+
|
19 |
+
### 仮想環境
|
20 |
+
また、ComfyUIのルートから以下のコマンドを使用する実験的なワンライナー・インストールもあります。これはコードをダウンロードし、依存関係をインストールし、インストールスクリプトを実行します:
|
21 |
+
|
22 |
+
```bash
|
23 |
+
curl -sSL "https://raw.githubusercontent.com/username/repo/main/install.py" | python3 -
|
24 |
+
```
|
25 |
+
|
26 |
+
## モデルのダウンロード
|
27 |
+
ノードによっては、追加モデルのダウンロードが必要な場合があるので、上記と同じ python 環境を使って対話的に行うことができる:
|
28 |
+
```bash
|
29 |
+
python scripts/download_models.py
|
30 |
+
```
|
31 |
+
|
32 |
+
プロンプトに従うか、Enterを押すだけで全てのモデルをダウンロードできます。
|
33 |
+
|
34 |
+
|
35 |
+
> **Note**
|
36 |
+
> プロンプトを出さずに全てのモデルをダウンロードするには、以下のようにします:
|
37 |
+
```bash
|
38 |
+
python scripts/download_models.py -y
|
39 |
+
```
|
40 |
+
|
41 |
+
### ウェブ拡張機能
|
42 |
+
|
43 |
+
初回実行時にスクリプトは[web extensions](https://github.com/melMass/comfy_mtb/tree/main/web)をあなたの快適な `web/extensions` フォルダに[シンボリックリンク](https://github.com/melMass/comfy_mtb/blob/d982b69a58c05ccead9c49370764beaa4549992a/__init__.py#L45-L61)しようとします。万が一失敗した場合は、mtbフォルダを手動で`ComfyUI/web/extensions`にコピーしてください:
|
44 |
+
|
45 |
+
<img alt="color widget preview" src="https://github.com/melMass/comfy_mtb/assets/7041726/cff7e66a-4cc4-4866-b35b-10af0bb2d110" width=450>
|
46 |
+
|
47 |
+
## 旧インストール方法 (MANUAL)
|
48 |
+
### 依存関係
|
49 |
+
|
50 |
+
<details><summary><h4>カスタム Virtualenv (私は主にこれを使っています)</h4></summary>
|
51 |
+
|
52 |
+
1. ComfyUIで使用しているPython環境であることを確認してください。
|
53 |
+
2. 以下のコマンドを実行して、必要な依存関係をインストールします:
|
54 |
+
```bash
|
55 |
+
pip install -r comfy_mtb/reqs.txt
|
56 |
+
```
|
57 |
+
|
58 |
+
</details>
|
59 |
+
|
60 |
+
<details><summary><h4>Comfy-portable / standalone (ComfyUI リリースより)</h4></summary>。
|
61 |
+
|
62 |
+
もしあなたがComfyUIスタンドアロンから`python-embeded`を使用している場合、バイナリがホイールを持っていない場合、依存関係をpipでインストールすることができません。この場合、最後の[リリース](https://github.com/melMass/comfy_mtb/releases)をチェックしてください。(ソースからのビルドが必要なもののみ)あらかじめビルドされたホイールがあるlinuxとwindows用のバンドルがあります。詳細は[この問題(#1)](https://github.com/melMass/comfy_mtb/issues/1)をチェックしてください。
|
63 |
+
|
64 |
+
![image](https://github.com/melMass/comfy_mtb/assets/7041726/2934fa14-3725-427c-8b9e-2b4f60ba1b7b)
|
65 |
+
|
66 |
+
</details>
|
67 |
+
|
68 |
+
<details><summary><h4>Google Colab</h4></summary>
|
69 |
+
|
70 |
+
ComfyUI with localtunnel (Recommended Way)**ヘッダーのすぐ後(コードセルの前)に、新しいコードセルを追加してください。
|
71 |
+
![colabに追加する場所のプレビュー](https://github.com/melMass/comfy_mtb/assets/7041726/35df2ef1-14f9-44cd-aa65-353829188cd7)
|
72 |
+
|
73 |
+
```python
|
74 |
+
# download the nodes
|
75 |
+
!git clone --recursive https://github.com/melMass/comfy_mtb.git custom_nodes/comfy_mtb
|
76 |
+
|
77 |
+
# download all models
|
78 |
+
!python custom_nodes/comfy_mtb/scripts/download_models.py -y
|
79 |
+
|
80 |
+
# install the dependencies
|
81 |
+
!pip install -r custom_nodes/comfy_mtb/reqs.txt -f https://download.openmmlab.com/mmcv/dist/cu118/torch2.0/index.html
|
82 |
+
```
|
83 |
+
これを実行した後、colabがランタイムを再起動する必要があると文句を言ったら、それを実行し、それ以前のセルは再実行せず、localtunnelを実行するセルだけを再実行してください。(最初に`%cd ComfyUI`のセルを追加する必要があるかもしれません...)
|
84 |
+
|
85 |
+
|
86 |
+
> **Note**:
|
87 |
+
> すべてのモデルが必要でない場合は、`-y`を削除してください : ![image](https://github.com/melMass/comfy_mtb/assets/7041726/40fc3602-f1d4-432a-98fd-ce2240f5ad06)
|
88 |
+
|
89 |
+
> **プレビュー**
|
90 |
+
> ![image](https://github.com/melMass/comfy_mtb/assets/7041726/b5b2b2d9-f1e8-4c43-b1db-7dfc5e07be86)
|
91 |
+
|
92 |
+
</details>
|
93 |
+
|
comfy_mtb/INSTALL.md
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Installation
|
2 |
+
- [Installation](#installation)
|
3 |
+
- [Automatic Install (Recommended)](#automatic-install-recommended)
|
4 |
+
- [ComfyUI Manager](#comfyui-manager)
|
5 |
+
- [Virtual Env](#virtual-env)
|
6 |
+
- [Models Download](#models-download)
|
7 |
+
- [Web Extensions](#web-extensions)
|
8 |
+
- [Old installation method (MANUAL)](#old-installation-method-manual)
|
9 |
+
- [Dependencies](#dependencies)
|
10 |
+
|
11 |
+
## Automatic Install (Recommended)
|
12 |
+
|
13 |
+
### ComfyUI Manager
|
14 |
+
|
15 |
+
As of version 0.1.0, this extension is meant to be installed with the [ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager), which helps a lot with handling the various install issues faced by various environments.
|
16 |
+
|
17 |
+
### Virtual Env
|
18 |
+
There is also an experimental one liner install using the following command from ComfyUI's root. It will download the code, install the dependencies and run the install script:
|
19 |
+
|
20 |
+
```bash
|
21 |
+
curl -sSL "https://raw.githubusercontent.com/username/repo/main/install.py" | python3 -
|
22 |
+
```
|
23 |
+
|
24 |
+
## Models Download
|
25 |
+
Some nodes require extra models to be downloaded, you can interactively do it using the same python environment as above:
|
26 |
+
```bash
|
27 |
+
python scripts/download_models.py
|
28 |
+
```
|
29 |
+
|
30 |
+
then follow the prompt or just press enter to download every models.
|
31 |
+
|
32 |
+
> **Note**
|
33 |
+
> You can use the following to download all models without prompt:
|
34 |
+
```bash
|
35 |
+
python scripts/download_models.py -y
|
36 |
+
```
|
37 |
+
|
38 |
+
### Web Extensions
|
39 |
+
|
40 |
+
On first run the script [tries to symlink](https://github.com/melMass/comfy_mtb/blob/d982b69a58c05ccead9c49370764beaa4549992a/__init__.py#L45-L61) the [web extensions](https://github.com/melMass/comfy_mtb/tree/main/web) to your comfy `web/extensions` folder. In case it fails you can manually copy the mtb folder to `ComfyUI/web/extensions` it only provides a color widget for now shared by a few nodes:
|
41 |
+
|
42 |
+
<img alt="color widget preview" src="https://github.com/melMass/comfy_mtb/assets/7041726/cff7e66a-4cc4-4866-b35b-10af0bb2d110" width=450>
|
43 |
+
|
44 |
+
## Old installation method (MANUAL)
|
45 |
+
### Dependencies
|
46 |
+
<details><summary><h4>Custom Virtualenv (I use this mainly)</h4></summary>
|
47 |
+
|
48 |
+
1. Make sure you are in the Python environment you use for ComfyUI.
|
49 |
+
2. Install the required dependencies by running the following command:
|
50 |
+
```bash
|
51 |
+
pip install -r comfy_mtb/reqs.txt
|
52 |
+
```
|
53 |
+
|
54 |
+
</details>
|
55 |
+
|
56 |
+
<details><summary><h4>Comfy-portable / standalone (from ComfyUI releases)</h4></summary>
|
57 |
+
|
58 |
+
If you use the `python-embeded` from ComfyUI standalone then you are not able to pip install dependencies with binaries when they don't have wheels, in this case check the last [release](https://github.com/melMass/comfy_mtb/releases) there is a bundle for linux and windows with prebuilt wheels (only the ones that require building from source), check [this issue (#1)](https://github.com/melMass/comfy_mtb/issues/1) for more info.
|
59 |
+
![image](https://github.com/melMass/comfy_mtb/assets/7041726/2934fa14-3725-427c-8b9e-2b4f60ba1b7b)
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
</details>
|
64 |
+
|
65 |
+
<details><summary><h4>Google Colab</h4></summary>
|
66 |
+
|
67 |
+
Add a new code cell just after the **Run ComfyUI with localtunnel (Recommended Way)** header (before the code cell)
|
68 |
+
![preview of where to add it on colab](https://github.com/melMass/comfy_mtb/assets/7041726/35df2ef1-14f9-44cd-aa65-353829188cd7)
|
69 |
+
|
70 |
+
|
71 |
+
```python
|
72 |
+
# download the nodes
|
73 |
+
!git clone --recursive https://github.com/melMass/comfy_mtb.git custom_nodes/comfy_mtb
|
74 |
+
|
75 |
+
# download all models
|
76 |
+
!python custom_nodes/comfy_mtb/scripts/download_models.py -y
|
77 |
+
|
78 |
+
# install the dependencies
|
79 |
+
!pip install -r custom_nodes/comfy_mtb/reqs.txt -f https://download.openmmlab.com/mmcv/dist/cu118/torch2.0/index.html
|
80 |
+
```
|
81 |
+
If after running this, colab complains about needing to restart runtime, do it, and then do not rerun earlier cells, just the one to run the localtunnel. (you might have to add a cell with `%cd ComfyUI` first...)
|
82 |
+
|
83 |
+
|
84 |
+
> **Note**:
|
85 |
+
> If you don't need all models, remove the `-y` as collab actually supports user input: ![image](https://github.com/melMass/comfy_mtb/assets/7041726/40fc3602-f1d4-432a-98fd-ce2240f5ad06)
|
86 |
+
|
87 |
+
> **Preview**
|
88 |
+
> ![image](https://github.com/melMass/comfy_mtb/assets/7041726/b5b2b2d9-f1e8-4c43-b1db-7dfc5e07be86)
|
89 |
+
|
90 |
+
</details>
|
91 |
+
|
comfy_mtb/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Mel Massadian
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
comfy_mtb/README-CN.md
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MTB Nodes
|
2 |
+
|
3 |
+
<a href="https://www.buymeacoffee.com/melmass" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 32px !important;width: 140px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a>
|
4 |
+
|
5 |
+
[** 安装指南**](./INSTALL-CN.md) | [** 示例**](https://github.com/melMass/comfy_mtb/wiki/Examples)
|
6 |
+
|
7 |
+
欢迎使用 MTB Nodes 项目!这个代码库是开放的,您可以自由地探索和利用。它的主要目的是构建用于 [MLOPs](https://github.com/Bismuth-Consultancy-BV/MLOPs) 中的概念验证(POCs)。该项目中的许多节点都是受到现有社区贡献或内置功能的启发而创建的。
|
8 |
+
|
9 |
+
在继续之前,请注意与此项目中使用的某些库相关的许可证。例如,`deepbump` 库采用 [GPLv3](https://github.com/HugoTini/DeepBump/blob/master/LICENSE) 许可证。
|
10 |
+
|
11 |
+
- [节点列表](#节点列表)
|
12 |
+
- [bbox](#bbox)
|
13 |
+
- [colors](#colors)
|
14 |
+
- [人脸检测/交换](#人脸检测交换)
|
15 |
+
- [图像插值(动画)](#图像插值动画)
|
16 |
+
- [图像操作](#图像操作)
|
17 |
+
- [潜在变量工具](#潜在变量工具)
|
18 |
+
- [其他工具](#其他工具)
|
19 |
+
- [纹理](#纹理)
|
20 |
+
- [Comfy 资源](#comfy-资源)
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
# 节点列表
|
26 |
+
|
27 |
+
## bbox
|
28 |
+
- `Bounding Box`: BBox 构造函数(自定义类型)
|
29 |
+
- `BBox From Mask`: 从遮罩中提取边界框
|
30 |
+
- `Crop`: 根据边界框裁剪图像
|
31 |
+
- `Uncrop`: 根据边界框还原图像
|
32 |
+
|
33 |
+
## colors
|
34 |
+
- `Colored Image`: 给定尺寸的纯色图像
|
35 |
+
- `RGB to HSV`: -
|
36 |
+
- `HSV to RGB`: -
|
37 |
+
- `Color Correct`: 基本颜色校正工具
|
38 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/7c20ac83-31ff-40ea-a1a0-06c2acefb2ef" width=345/>
|
39 |
+
|
40 |
+
## 人脸检测/交换
|
41 |
+
- `Face Swap`: 使用 deepinsight/insightface 模型进行人脸交换(该节点在早期版本中称为 `Roop`,功能相同,`Roop` 只是使用这些模型的应用程序)
|
42 |
+
> **注意**
|
43 |
+
> 人脸索引允许您选择要替换的人脸,如下所示:
|
44 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/2e9d6066-c466-4a01-bd6c-315f7f1e8b42" width=320/>
|
45 |
+
- `Load Face Swap Model`: 加载 insightface 模型用于人脸交换
|
46 |
+
- `Restore Face`: 使用 [GFPGan](https://github.com/TencentARC/GFPGAN) 还原人脸,与 `Face Swap` 配合使用效果很好,并支持 `bg_upscaler` 的 Comfy 原生放大器
|
47 |
+
|
48 |
+
## 图像插值(动画)
|
49 |
+
- `Load Film Model`: 加载 [FILM](https://github.com/google-research/frame-interpolation) 模型
|
50 |
+
- `Film Interpolation`: 使用 [FILM](https://github.com/google-research/frame-interpolation) 处理输入帧
|
51 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/3afd1647-6634-4b92-a34b-51432e6a9834" width=400/>
|
52 |
+
- `Export to Prores (experimental)`: 将输入帧导出为 ProRes 4444 mov 文件。这使用 ffmpeg stdin 发送原始的 NumPy 数组,与 `Film Interpolation` 一起使用,目前很简单,但可以进一步扩展。
|
53 |
+
|
54 |
+
## 图像操作
|
55 |
+
- `Blur`: 使用高斯滤波器对图像进行模糊处理。
|
56 |
+
- `Deglaze Image`: 从 [FN16](https://github.com/Fannovel16/FN16-ComfyUI-nodes/blob/main/DeglazeImage.py) 中提取
|
57 |
+
- `Denoise`: 对输入图像进行降噪处理
|
58 |
+
- `Image Compare`: 比较两个图像并返回差异图像
|
59 |
+
- `Image Premultiply`: 使用掩码对图像进行预乘处理
|
60 |
+
- `Image Remove Background Rembg`: 使用 [RemBG](https://github.com/danielgatis/rembg) 进行背景去除
|
61 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/e69253b4-c03c-45e9-92b5-aa46fb887be8" width=320/>
|
62 |
+
- `Image Resize Factor`: 大部分提取自 [WAS Node Suite](https://github.com/WASasquatch/was-node-suite-comfyui),经过一些编辑(特别是支持多个图像)和较少的功能。
|
63 |
+
- `Mask To Image`: 将遮罩(Alpha)转换为带有颜色和背景的 RGB 图像
|
64 |
+
- `Save Image Grid`: 将输入批次中的所有图像保存为图像网格。
|
65 |
+
|
66 |
+
## 潜在变量工具
|
67 |
+
- `Latent Lerp`: 两个潜在变量之间的线性插值(混合)
|
68 |
+
|
69 |
+
|
70 |
+
## 其他工具
|
71 |
+
- `Concat Images`: 接受两个图像流,并将它们合并为其他 Comfy 管道支持的图像批次。
|
72 |
+
- `Image Resize Factor`: **已弃用**,因为我后来发现了内
|
73 |
+
|
74 |
+
置的图像调整大小功能。
|
75 |
+
- `Text To Image`: 使用字体将文本转换为图像的工具
|
76 |
+
- `Styles Loader`: 加载 csv 文件并从行中填充下拉列表(类似于 A111)
|
77 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/02fe3211-18ee-4e54-a029-931388f5fde8" width=320/>
|
78 |
+
- `Smart Step`: 一个非常基本的节点,用于获取在 KSampler 高级中使用的步骤百分比
|
79 |
+
- `Qr Code`: 基本的 QR Code 生成器
|
80 |
+
- `Save Tensors`: 调试节点,将来可能会被删除
|
81 |
+
- `Int to Number`: 用于 WASSuite 数字节点的补充
|
82 |
+
- `Smart Step`: 使用百分比来控制 `KAdvancedSampler` 的步骤(开始/停止)
|
83 |
+
|
84 |
+
## 纹理
|
85 |
+
|
86 |
+
- `DeepBump`: 从单张图片生成法线图和高度图
|
87 |
+
|
88 |
+
# Comfy 资源
|
89 |
+
|
90 |
+
**指南**:
|
91 |
+
- [��方示例(英文)](https://comfyanonymous.github.io/ComfyUI_examples/)
|
92 |
+
- @BlenderNeko 的[ComfyUI 社区手册(英文)](https://blenderneko.github.io/ComfyUI-docs/)
|
93 |
+
|
94 |
+
- @tjhayasaka 的[Tomoaki 个人 Wiki(日文)](https://comfyui.creamlab.net/guides/)
|
95 |
+
|
96 |
+
**扩展和自定义节点**:
|
97 |
+
- @WASasquatch 的[Comfy 列表插件(英文)](https://github.com/WASasquatch/comfyui-plugins)
|
98 |
+
|
99 |
+
- [CivitAI 上的 ComfyUI 标签(英文)](https://civitai.com/tag/comfyui)
|
comfy_mtb/README-JP.md
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MTB Nodes
|
2 |
+
|
3 |
+
<a href="https://www.buymeacoffee.com/melmass" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 32px !important;width: 140px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a>
|
4 |
+
|
5 |
+
[**インストールガイド**](./INSTALL-JP.md) | [**サンプル**](https://github.com/melMass/comfy_mtb/wiki/Examples)
|
6 |
+
|
7 |
+
MTB Nodesプロジェクトへようこそ!このコードベースは、自由に探索し、利用することができます。主な目的は、[MLOPs](https://github.com/Bismuth-Consultancy-BV/MLOPs)の実装のための概念実証(POC)を構築することです。このプロジェクトの多くのノードは、既存のコミュニティの貢献や組み込みの機能に触発されています。
|
8 |
+
|
9 |
+
続行する前に、このプロジェクトで使用されている特定のライブラリに関連するライセンスに注意してください。たとえば、「deepbump」ライブラリは、[GPLv3](https://github.com/HugoTini/DeepBump/blob/master/LICENSE)の下でライセンスされています。
|
10 |
+
|
11 |
+
- [ノードリスト](#ノードリスト)
|
12 |
+
- [bbox](#bbox)
|
13 |
+
- [colors](#colors)
|
14 |
+
- [顔検出 / スワッピング](#顔検出--スワッピング)
|
15 |
+
- [画像補間(アニメーション)](#画像補間アニメーション)
|
16 |
+
- [画像操作](#画像操作)
|
17 |
+
- [潜在的なユーティリティ](#潜在的なユーティリティ)
|
18 |
+
- [その他のユーティリティ](#その他のユーティリティ)
|
19 |
+
- [テクスチャ](#テクスチャ)
|
20 |
+
- [Comfyリソース](#comfyリソース)
|
21 |
+
|
22 |
+
|
23 |
+
# ノードリスト
|
24 |
+
|
25 |
+
## bbox
|
26 |
+
- `Bounding Box`: BBoxコンストラクタ(カスタムタイプ)
|
27 |
+
- `BBox From Mask`: マスクからバウンディングボックスを抽出
|
28 |
+
- `Crop`: BBoxから画像を切り抜く
|
29 |
+
- `Uncrop`: BBoxから画像を元に戻す
|
30 |
+
|
31 |
+
## colors
|
32 |
+
- `Colored Image`: 指定されたサイズの一定の色の画像
|
33 |
+
- `RGB to HSV`: -
|
34 |
+
- `HSV to RGB`: -
|
35 |
+
- `Color Correct`: 基本的なカラーコレクションツール
|
36 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/7c20ac83-31ff-40ea-a1a0-06c2acefb2ef" width=345/>
|
37 |
+
|
38 |
+
## 顔検出 / スワッピング
|
39 |
+
- `Face Swap`: deepinsight/insightfaceモデルを使用した顔の入れ替え(このノードは初期バージョンでは「Roop」と呼ばれていましたが、同じ機能を提供します。Roopは単にこれらのモデルを使用するアプリです)
|
40 |
+
> **注意**
|
41 |
+
> 顔のインデックスを使用して置き換える顔を選択できます。以下を参照してください:
|
42 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/2e9d6066-c466-4a01-bd6c-315f7f1e8b42" width=320/>
|
43 |
+
- `Load Face Swap Model`: 顔の交換のためのinsightfaceモデルを読み込む
|
44 |
+
- `Restore Face`: [GFPGan](https://github.com/TencentARC/GFPGAN)を使用して顔を復元し、`Face Swap`と組み合わせて使用すると非常に効果的であり、`bg_upscaler`のComfyネイティブアップスケーラーもサポートしています。
|
45 |
+
|
46 |
+
## 画像補間(アニメーション)
|
47 |
+
- `Load Film Model`: [FILM](https://github.com/google-research/frame-interpolation)モデルを読み込む
|
48 |
+
- `Film Interpolation`: [FILM](https://github.com/google-research/frame-interpolation)を使用して入力フレームを処理する
|
49 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/3afd1647-6634-4b92-a34b-51432e6a9834" width=400/>
|
50 |
+
- `Export to Prores (experimental)`: 入力フレームをProRes 4444 movファイルにエクスポートします。これは現在は単純なものですが、`Film Interpolation`と組み合わせて使用するためのffmpegのstdinを使用して生のNumPy配列を送信するもので、拡張することもできます。
|
51 |
+
|
52 |
+
## 画像操作
|
53 |
+
- `Blur`: ガウスフィルタを使用して画像をぼかす
|
54 |
+
- `Deglaze Image`: [FN16](https://github.com/Fannovel16/FN16-ComfyUI-nodes/blob/main/DeglazeImage.py)から取得
|
55 |
+
- `Denoise`: 入力画像のノイズを除去する
|
56 |
+
- `Image Compare`: 2つの画像を比較し、差分画像を返す
|
57 |
+
- `Image Premultiply`: 画像をマスクで乗算
|
58 |
+
- `Image Remove Background Rembg`: [RemBG](https://github.com/danielgatis/rembg)を使用した背景除去
|
59 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/704172
|
60 |
+
|
61 |
+
6/e69253b4-c03c-45e9-92b5-aa46fb887be8" width=320/>
|
62 |
+
- `Image Resize Factor`: [WAS Node Suite](https://github.com/WASasquatch/was-node-suite-comfyui)から抽出され、いくつかの編集(特に複数の画像のサポート)と機能の削減が行われました。
|
63 |
+
- `Mask To Image`: マスク(アルファ)をカラーと背景を持つRGBイメージに変換します。
|
64 |
+
- `Save Image Grid`: 入力バッチのすべての画像を画像グリッドとして保存します。
|
65 |
+
|
66 |
+
## 潜在的なユーティリティ
|
67 |
+
- `Latent Lerp`: 2つの潜在的なベクトルの間の線形補間(ブレンド)
|
68 |
+
|
69 |
+
## その他のユーティリティ
|
70 |
+
- `Concat Images`: 2つの画像ストリームを取り、他のComfyパイプラインでサポートされている画像のバッチとしてマージします。
|
71 |
+
- `Image Resize Factor`: **非推奨**。組み込みの画像リサイズ機能を発見したため、削除される予定です。
|
72 |
+
- `Text To Image`: フォントを使用してテキストを画像に変換するためのユーティリティ
|
73 |
+
- `Styles Loader`: csvファイルをロードし、行からドロップダウンを作成します(A111のようなもの)
|
74 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/02fe3211-18ee-4e54-a029-931388f5fde8" width=320/>
|
75 |
+
- `Smart Step`: KSamplerの高度な使用に使用するステップパーセントを取得する非常に基本的なノード
|
76 |
+
- `Qr Code`: 基本的なQRコード生成器
|
77 |
+
- `Save Tensors`: 将来的に削除される可能性のあるデバッグノード
|
78 |
+
- `Int to Number`: WASSuiteの数値ノードの補完
|
79 |
+
- `Smart Step`: `KAdvancedSampler`のステップ(開始/停止)を制御するための非常に基本的なツールで、パーセンテージを使用します。
|
80 |
+
|
81 |
+
## テクスチャ
|
82 |
+
|
83 |
+
- `DeepBump`: 1枚の画像から法線マップと高さマップを生成します。
|
84 |
+
|
85 |
+
# Comfyリソース
|
86 |
+
|
87 |
+
**ガイド**:
|
88 |
+
- [公式の例(英語)](https://comfyanonymous.github.io/ComfyUI_examples/)
|
89 |
+
- @BlenderNekoによる[ComfyUIコミュニティマニュアル(英語)](https://blenderneko.github.io/ComfyUI-docs/)
|
90 |
+
|
91 |
+
- @tjhayasakaによる[Tomoakiの個人Wiki(日本語)](https://comfyui.creamlab.net/guides/)
|
92 |
+
|
93 |
+
**拡張機能とカスタムノード**:
|
94 |
+
- @WASasquatchによる[Comfyリスト用のプラグイン(英語)](https://github.com/WASasquatch/comfyui-plugins)
|
95 |
+
|
96 |
+
- [CivitAIのComfyUIタグ(英語)](https://civitai.com/tag/comfyui)
|
comfy_mtb/README.md
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MTB Nodes
|
2 |
+
|
3 |
+
[![embedded test](https://github.com/melMass/comfy_mtb/actions/workflows/test_embedded.yml/badge.svg)](https://github.com/melMass/comfy_mtb/actions/workflows/test_embedded.yml)
|
4 |
+
|
5 |
+
<!-- omit in toc -->
|
6 |
+
|
7 |
+
**Translated Readme (using DeepTranslate, PRs are welcome)**:
|
8 |
+
![image](https://github.com/melMass/comfy_mtb/assets/7041726/f8429c14-3521-4e28-82a3-863d781976c0)
|
9 |
+
[日本語による説明](./README-JP.md)
|
10 |
+
![image](https://github.com/melMass/comfy_mtb/assets/7041726/d5cc1fdd-2820-4a5c-b2d7-482f1c222063)
|
11 |
+
[中文说明](./README-CN.md)
|
12 |
+
|
13 |
+
<a href="https://www.buymeacoffee.com/melmass" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 32px !important;width: 140px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a>
|
14 |
+
|
15 |
+
[**Install Guide**](./INSTALL.md) | [**Examples**](https://github.com/melMass/comfy_mtb/wiki/Examples)
|
16 |
+
|
17 |
+
Welcome to the MTB Nodes project! This codebase is open for you to explore and utilize as you wish. Its primary purpose is to build proof-of-concepts (POCs) for implementation in [MLOPs](https://github.com/Bismuth-Consultancy-BV/MLOPs). Many nodes in this project are inspired by existing community contributions or built-in functionalities.
|
18 |
+
|
19 |
+
Before proceeding, please be aware of the licenses associated with certain libraries used in this project. For example, the `deepbump` library is licensed under [GPLv3](https://github.com/HugoTini/DeepBump/blob/master/LICENSE).
|
20 |
+
|
21 |
+
- [Node List](#node-list)
|
22 |
+
- [bbox](#bbox)
|
23 |
+
- [colors](#colors)
|
24 |
+
- [face detection / swapping](#face-detection--swapping)
|
25 |
+
- [image interpolation (animation)](#image-interpolation-animation)
|
26 |
+
- [image ops](#image-ops)
|
27 |
+
- [latent utils](#latent-utils)
|
28 |
+
- [misc utils](#misc-utils)
|
29 |
+
- [textures](#textures)
|
30 |
+
- [Comfy Resources](#comfy-resources)
|
31 |
+
|
32 |
+
|
33 |
+
# Node List
|
34 |
+
|
35 |
+
## bbox
|
36 |
+
- `Bounding Box`: BBox constructor (custom type),
|
37 |
+
- `BBox From Mask`: From a mask extract the bounding box
|
38 |
+
- `Crop`: Crop image from BBox
|
39 |
+
- `Uncrop`: Uncrop image from BBox
|
40 |
+
|
41 |
+
## colors
|
42 |
+
- `Colored Image`: Constant color image of given size
|
43 |
+
- `RGB to HSV`: -,
|
44 |
+
- `HSV to RGB`: -,
|
45 |
+
- `Color Correct`: Basic color correction tools
|
46 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/7c20ac83-31ff-40ea-a1a0-06c2acefb2ef" width=345/>
|
47 |
+
|
48 |
+
## face detection / swapping
|
49 |
+
- `Face Swap`: Face swap using deepinsight/insightface models (this node used to be called `Roop` in early versions, it does the same, roop is *just* an app that uses those model)
|
50 |
+
> **Note**
|
51 |
+
> The face index allow you to choose which face to replace as you can see here:
|
52 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/2e9d6066-c466-4a01-bd6c-315f7f1e8b42" width=320/>
|
53 |
+
- `Load Face Swap Model`: Load an insightface model for face swapping
|
54 |
+
- `Restore Face`: Using [GFPGan](https://github.com/TencentARC/GFPGAN) to restore faces, works great in conjunction with `Face Swap` and supports Comfy native upscalers for the `bg_upscaler`
|
55 |
+
|
56 |
+
## image interpolation (animation)
|
57 |
+
- `Load Film Model`: Loads a [FILM](https://github.com/google-research/frame-interpolation) model
|
58 |
+
- `Film Interpolation`: Process input frames using [FILM](https://github.com/google-research/frame-interpolation)
|
59 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/3afd1647-6634-4b92-a34b-51432e6a9834" width=400/>
|
60 |
+
- `Export to Prores (experimental)`: Exports the input frames to a ProRes 4444 mov file. This is using ffmpeg stdin to send raw numpy arrays, used with `Film Interpolation` and very simple for now but could be expanded upon.
|
61 |
+
|
62 |
+
## image ops
|
63 |
+
- `Blur`: Blur an image using a Gaussian filter.
|
64 |
+
- `Deglaze Image`: taken from [FN16](https://github.com/Fannovel16/FN16-ComfyUI-nodes/blob/main/DeglazeImage.py),
|
65 |
+
- `Denoise`: Denoise the input image,
|
66 |
+
- `Image Compare`: Compare two images and return a difference image
|
67 |
+
- `Image Premultiply`: Premultiply image with mask
|
68 |
+
- `Image Remove Background Rembg`: [RemBG](https://github.com/danielgatis/rembg) powered background removal.
|
69 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/e69253b4-c03c-45e9-92b5-aa46fb887be8" width=320/>
|
70 |
+
- `Image Resize Factor`: Extracted mostly from [WAS Node Suite](https://github.com/WASasquatch/was-node-suite-comfyui), with a few edits (most notably multiple image support) and less features.
|
71 |
+
- `Mask To Image`: Converts a mask (alpha) to an RGB image with a color and background
|
72 |
+
- `Save Image Grid`: Save all the images in the input batch as a grid of images.
|
73 |
+
|
74 |
+
## latent utils
|
75 |
+
- `Latent Lerp`: Linear interpolation (blend) between two latent
|
76 |
+
|
77 |
+
|
78 |
+
## misc utils
|
79 |
+
- `Concat Images`: Takes two image stream and merge them as a batch of images supported by other Comfy pipelines.
|
80 |
+
- `Image Resize Factor`: **Deprecated**, I since discovered the builtin image resize.
|
81 |
+
- `Text To Image`: Utils to convert text to image using a font
|
82 |
+
- `Styles Loader`: Load csv files and populate a dropdown from the rows (à la A111)
|
83 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/02fe3211-18ee-4e54-a029-931388f5fde8" width=320/>
|
84 |
+
- `Smart Step`: A very basic node to get step percent to use in KSampler advanced,
|
85 |
+
- `Qr Code`: Basic QR Code generator
|
86 |
+
- `Save Tensors`: Debug node that will probably be removed in the future
|
87 |
+
- `Int to Number`: Supplement for WASSuite number nodes
|
88 |
+
- `Smart Step`: A very basic tool to control the steps (start/stop) of the `KAdvancedSampler` using percentage
|
89 |
+
|
90 |
+
## textures
|
91 |
+
|
92 |
+
- `DeepBump`: Normal & height maps generation from single pictures
|
93 |
+
|
94 |
+
# Comfy Resources
|
95 |
+
|
96 |
+
**Guides**:
|
97 |
+
- [Official Examples (eng)](https://comfyanonymous.github.io/ComfyUI_examples/)
|
98 |
+
- [ComfyUI Community Manual (eng)](https://blenderneko.github.io/ComfyUI-docs/) by @BlenderNeko
|
99 |
+
|
100 |
+
- [Tomoaki's personal Wiki (jap)](https://comfyui.creamlab.net/guides/) by @tjhayasaka
|
101 |
+
|
102 |
+
**Extensions and Custom Nodes**:
|
103 |
+
- [Plugins for Comfy List (eng)](https://github.com/WASasquatch/comfyui-plugins) by @WASasquatch
|
104 |
+
|
105 |
+
- [ComfyUI tag on CivitAI (eng)](https://civitai.com/tag/comfyui)
|
comfy_mtb/__init__.py
ADDED
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding:utf-8 -*-
|
3 |
+
###
|
4 |
+
# File: __init__.py
|
5 |
+
# Project: comfy_mtb
|
6 |
+
# Author: Mel Massadian
|
7 |
+
# Copyright (c) 2023 Mel Massadian
|
8 |
+
#
|
9 |
+
###
|
10 |
+
import os
|
11 |
+
|
12 |
+
# todo: don't override this if the user has that setup already
|
13 |
+
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
|
14 |
+
os.environ["TF_GPU_ALLOCATOR"] = "cuda_malloc_async"
|
15 |
+
|
16 |
+
import traceback
|
17 |
+
from .log import log, blue_text, cyan_text, get_summary, get_label
|
18 |
+
from .utils import here
|
19 |
+
from .utils import comfy_dir
|
20 |
+
import importlib
|
21 |
+
import os
|
22 |
+
import ast
|
23 |
+
import json
|
24 |
+
|
25 |
+
NODE_CLASS_MAPPINGS = {}
|
26 |
+
NODE_DISPLAY_NAME_MAPPINGS = {}
|
27 |
+
NODE_CLASS_MAPPINGS_DEBUG = {}
|
28 |
+
|
29 |
+
__version__ = "0.1.4"
|
30 |
+
|
31 |
+
|
32 |
+
def extract_nodes_from_source(filename):
|
33 |
+
source_code = ""
|
34 |
+
|
35 |
+
with open(filename, "r") as file:
|
36 |
+
source_code = file.read()
|
37 |
+
|
38 |
+
nodes = []
|
39 |
+
|
40 |
+
try:
|
41 |
+
parsed = ast.parse(source_code)
|
42 |
+
for node in ast.walk(parsed):
|
43 |
+
if isinstance(node, ast.Assign) and len(node.targets) == 1:
|
44 |
+
target = node.targets[0]
|
45 |
+
if isinstance(target, ast.Name) and target.id == "__nodes__":
|
46 |
+
value = ast.get_source_segment(source_code, node.value)
|
47 |
+
node_value = ast.parse(value).body[0].value
|
48 |
+
if isinstance(node_value, (ast.List, ast.Tuple)):
|
49 |
+
nodes.extend(
|
50 |
+
element.id
|
51 |
+
for element in node_value.elts
|
52 |
+
if isinstance(element, ast.Name)
|
53 |
+
)
|
54 |
+
break
|
55 |
+
except SyntaxError:
|
56 |
+
log.error("Failed to parse")
|
57 |
+
return nodes
|
58 |
+
|
59 |
+
|
60 |
+
def load_nodes():
|
61 |
+
errors = []
|
62 |
+
nodes = []
|
63 |
+
nodes_failed = []
|
64 |
+
|
65 |
+
for filename in (here / "nodes").iterdir():
|
66 |
+
if filename.suffix == ".py":
|
67 |
+
module_name = filename.stem
|
68 |
+
|
69 |
+
try:
|
70 |
+
module = importlib.import_module(
|
71 |
+
f".nodes.{module_name}", package=__package__
|
72 |
+
)
|
73 |
+
_nodes = getattr(module, "__nodes__")
|
74 |
+
nodes.extend(_nodes)
|
75 |
+
log.debug(f"Imported {module_name} nodes")
|
76 |
+
|
77 |
+
except AttributeError:
|
78 |
+
pass # wip nodes
|
79 |
+
except Exception:
|
80 |
+
error_message = traceback.format_exc().splitlines()[-1]
|
81 |
+
errors.append(
|
82 |
+
f"Failed to import module {module_name} because {error_message}"
|
83 |
+
)
|
84 |
+
# Read __nodes__ variable from the source file
|
85 |
+
nodes_failed.extend(extract_nodes_from_source(filename))
|
86 |
+
|
87 |
+
if errors:
|
88 |
+
log.info(
|
89 |
+
f"Some nodes failed to load:\n\t"
|
90 |
+
+ "\n\t".join(errors)
|
91 |
+
+ "\n\n"
|
92 |
+
+ "Check that you properly installed the dependencies.\n"
|
93 |
+
+ "If you think this is a bug, please report it on the github page (https://github.com/melMass/comfy_mtb/issues)"
|
94 |
+
)
|
95 |
+
|
96 |
+
return (nodes, nodes_failed)
|
97 |
+
|
98 |
+
|
99 |
+
# - REGISTER WEB EXTENSIONS
|
100 |
+
web_extensions_root = comfy_dir / "web" / "extensions"
|
101 |
+
web_mtb = web_extensions_root / "mtb"
|
102 |
+
|
103 |
+
if web_mtb.exists():
|
104 |
+
log.debug(f"Web extensions folder found at {web_mtb}")
|
105 |
+
if not os.path.islink(web_mtb.as_posix()):
|
106 |
+
log.warn(
|
107 |
+
f"Web extensions folder at {web_mtb} is not a symlink, if updating please delete it before"
|
108 |
+
)
|
109 |
+
|
110 |
+
|
111 |
+
elif web_extensions_root.exists():
|
112 |
+
web_tgt = here / "web"
|
113 |
+
src = web_tgt.as_posix()
|
114 |
+
dst = web_mtb.as_posix()
|
115 |
+
try:
|
116 |
+
if os.name == "nt":
|
117 |
+
import _winapi
|
118 |
+
|
119 |
+
_winapi.CreateJunction(src, dst)
|
120 |
+
else:
|
121 |
+
os.symlink(web_tgt.as_posix(), web_mtb.as_posix())
|
122 |
+
|
123 |
+
except OSError:
|
124 |
+
log.warn(f"Failed to create symlink to {web_mtb}, trying to copy it")
|
125 |
+
try:
|
126 |
+
import shutil
|
127 |
+
|
128 |
+
shutil.copytree(web_tgt, web_mtb)
|
129 |
+
log.info(f"Successfully copied {web_tgt} to {web_mtb}")
|
130 |
+
except Exception as e:
|
131 |
+
log.warn(
|
132 |
+
f"Failed to symlink and copy {web_tgt} to {web_mtb}. Please copy the folder manually."
|
133 |
+
)
|
134 |
+
log.warn(e)
|
135 |
+
|
136 |
+
except Exception as e:
|
137 |
+
log.warn(
|
138 |
+
f"Failed to create symlink to {web_mtb}. Please copy the folder manually."
|
139 |
+
)
|
140 |
+
log.warn(e)
|
141 |
+
else:
|
142 |
+
log.warn(
|
143 |
+
f"Comfy root probably not found automatically, please copy the folder {web_mtb} manually in the web/extensions folder of ComfyUI"
|
144 |
+
)
|
145 |
+
|
146 |
+
# - REGISTER NODES
|
147 |
+
nodes, failed = load_nodes()
|
148 |
+
for node_class in nodes:
|
149 |
+
class_name = node_class.__name__
|
150 |
+
node_label = f"{get_label(class_name)} (mtb)"
|
151 |
+
NODE_CLASS_MAPPINGS[node_label] = node_class
|
152 |
+
NODE_DISPLAY_NAME_MAPPINGS[class_name] = node_label
|
153 |
+
NODE_CLASS_MAPPINGS_DEBUG[node_label] = node_class.__doc__
|
154 |
+
# TODO: I removed this, I find it more convenient to write without spaces, but it breaks every of my workflows
|
155 |
+
# TODO (cont): and until I find a way to automate the conversion, I'll leave it like this
|
156 |
+
|
157 |
+
if os.environ.get("MTB_EXPORT"):
|
158 |
+
with open(here / "node_list.json", "w") as f:
|
159 |
+
f.write(
|
160 |
+
json.dumps(
|
161 |
+
{
|
162 |
+
k: NODE_CLASS_MAPPINGS_DEBUG[k]
|
163 |
+
for k in sorted(NODE_CLASS_MAPPINGS_DEBUG.keys())
|
164 |
+
},
|
165 |
+
indent=4,
|
166 |
+
)
|
167 |
+
)
|
168 |
+
|
169 |
+
log.info(
|
170 |
+
f"Loaded the following nodes:\n\t"
|
171 |
+
+ "\n\t".join(
|
172 |
+
f"{cyan_text(k)}: {blue_text(get_summary(doc)) if doc else '-'}"
|
173 |
+
for k, doc in NODE_CLASS_MAPPINGS_DEBUG.items()
|
174 |
+
)
|
175 |
+
)
|
176 |
+
|
177 |
+
# - ENDPOINT
|
178 |
+
from server import PromptServer
|
179 |
+
from .log import log
|
180 |
+
from aiohttp import web
|
181 |
+
from importlib import reload
|
182 |
+
import logging
|
183 |
+
from .endpoint import endlog
|
184 |
+
|
185 |
+
if hasattr(PromptServer, "instance"):
|
186 |
+
restore_deps = ["basicsr"]
|
187 |
+
swap_deps = ["insightface", "onnxruntime"]
|
188 |
+
|
189 |
+
node_dependency_mapping = {
|
190 |
+
"FaceSwap": swap_deps,
|
191 |
+
"LoadFaceSwapModel": swap_deps,
|
192 |
+
"LoadFaceAnalysisModel": restore_deps,
|
193 |
+
}
|
194 |
+
|
195 |
+
@PromptServer.instance.routes.get("/mtb/status")
|
196 |
+
async def get_full_library(request):
|
197 |
+
from . import endpoint
|
198 |
+
|
199 |
+
reload(endpoint)
|
200 |
+
|
201 |
+
endlog.debug("Getting node registration status")
|
202 |
+
# Check if the request prefers HTML content
|
203 |
+
if "text/html" in request.headers.get("Accept", ""):
|
204 |
+
# # Return an HTML page
|
205 |
+
html_response = endpoint.render_table(
|
206 |
+
NODE_CLASS_MAPPINGS_DEBUG, title="Registered"
|
207 |
+
)
|
208 |
+
html_response += endpoint.render_table(
|
209 |
+
{
|
210 |
+
k: {"dependencies": node_dependency_mapping.get(k)}
|
211 |
+
if node_dependency_mapping.get(k)
|
212 |
+
else "-"
|
213 |
+
for k in failed
|
214 |
+
},
|
215 |
+
title="Failed to load",
|
216 |
+
)
|
217 |
+
|
218 |
+
return web.Response(
|
219 |
+
text=endpoint.render_base_template("MTB", html_response),
|
220 |
+
content_type="text/html",
|
221 |
+
)
|
222 |
+
|
223 |
+
return web.json_response(
|
224 |
+
{
|
225 |
+
"registered": NODE_CLASS_MAPPINGS_DEBUG,
|
226 |
+
"failed": failed,
|
227 |
+
}
|
228 |
+
)
|
229 |
+
|
230 |
+
@PromptServer.instance.routes.post("/mtb/debug")
|
231 |
+
async def set_debug(request):
|
232 |
+
json_data = await request.json()
|
233 |
+
enabled = json_data.get("enabled")
|
234 |
+
if enabled:
|
235 |
+
os.environ["MTB_DEBUG"] = "true"
|
236 |
+
log.setLevel(logging.DEBUG)
|
237 |
+
log.debug("Debug mode set from API (/mtb/debug POST route)")
|
238 |
+
|
239 |
+
elif "MTB_DEBUG" in os.environ:
|
240 |
+
# del os.environ["MTB_DEBUG"]
|
241 |
+
os.environ.pop("MTB_DEBUG")
|
242 |
+
log.setLevel(logging.INFO)
|
243 |
+
|
244 |
+
return web.json_response(
|
245 |
+
{"message": f"Debug mode {'set' if enabled else 'unset'}"}
|
246 |
+
)
|
247 |
+
|
248 |
+
@PromptServer.instance.routes.get("/mtb")
|
249 |
+
async def get_home(request):
|
250 |
+
from . import endpoint
|
251 |
+
|
252 |
+
reload(endpoint)
|
253 |
+
# Check if the request prefers HTML content
|
254 |
+
if "text/html" in request.headers.get("Accept", ""):
|
255 |
+
# # Return an HTML page
|
256 |
+
html_response = """
|
257 |
+
<div class="flex-container menu">
|
258 |
+
<a href="/mtb/debug">debug</a>
|
259 |
+
<a href="/mtb/status">status</a>
|
260 |
+
</div>
|
261 |
+
"""
|
262 |
+
return web.Response(
|
263 |
+
text=endpoint.render_base_template("MTB", html_response),
|
264 |
+
content_type="text/html",
|
265 |
+
)
|
266 |
+
|
267 |
+
# Return JSON for other requests
|
268 |
+
return web.json_response({"message": "Welcome to MTB!"})
|
269 |
+
|
270 |
+
@PromptServer.instance.routes.get("/mtb/debug")
|
271 |
+
async def get_debug(request):
|
272 |
+
from . import endpoint
|
273 |
+
|
274 |
+
reload(endpoint)
|
275 |
+
enabled = "MTB_DEBUG" in os.environ
|
276 |
+
# Check if the request prefers HTML content
|
277 |
+
if "text/html" in request.headers.get("Accept", ""):
|
278 |
+
# # Return an HTML page
|
279 |
+
html_response = f"""
|
280 |
+
<h1>MTB Debug Status: {'Enabled' if enabled else 'Disabled'}</h1>
|
281 |
+
"""
|
282 |
+
return web.Response(
|
283 |
+
text=endpoint.render_base_template("Debug", html_response),
|
284 |
+
content_type="text/html",
|
285 |
+
)
|
286 |
+
|
287 |
+
# Return JSON for other requests
|
288 |
+
return web.json_response({"enabled": enabled})
|
289 |
+
|
290 |
+
@PromptServer.instance.routes.get("/mtb/actions")
|
291 |
+
async def no_route(request):
|
292 |
+
from . import endpoint
|
293 |
+
|
294 |
+
if "text/html" in request.headers.get("Accept", ""):
|
295 |
+
html_response = """
|
296 |
+
<h1>Actions has no get for now...</h1>
|
297 |
+
"""
|
298 |
+
return web.Response(
|
299 |
+
text=endpoint.render_base_template("Actions", html_response),
|
300 |
+
content_type="text/html",
|
301 |
+
)
|
302 |
+
return web.json_response({"message": "actions has no get for now"})
|
303 |
+
|
304 |
+
@PromptServer.instance.routes.post("/mtb/actions")
|
305 |
+
async def do_action(request):
|
306 |
+
from . import endpoint
|
307 |
+
|
308 |
+
reload(endpoint)
|
309 |
+
|
310 |
+
return await endpoint.do_action(request)
|
311 |
+
|
312 |
+
|
313 |
+
# - WAS Dictionary
|
314 |
+
MANIFEST = {
|
315 |
+
"name": "MTB Nodes", # The title that will be displayed on Node Class menu,. and Node Class view
|
316 |
+
"version": (0, 1, 0), # Version of the custom_node or sub module
|
317 |
+
"author": "Mel Massadian", # Author or organization of the custom_node or sub module
|
318 |
+
"project": "https://github.com/melMass/comfy_mtb", # The address that the `name` value will link to on Node Class Views
|
319 |
+
"description": "Set of nodes that enhance your animation workflow and provide a range of useful tools including features such as manipulating bounding boxes, perform color corrections, swap faces in images, interpolate frames for smooth animation, export to ProRes format, apply various image operations, work with latent spaces, generate QR codes, and create normal and height maps for textures.",
|
320 |
+
}
|
comfy_mtb/endpoint.py
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .utils import here, run_command, comfy_mode
|
2 |
+
from aiohttp import web
|
3 |
+
from .log import mklog
|
4 |
+
import sys
|
5 |
+
|
6 |
+
endlog = mklog("mtb endpoint")
|
7 |
+
|
8 |
+
# - ACTIONS
|
9 |
+
import requirements
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
def ACTIONS_installDependency(dependency_names=None):
|
14 |
+
if dependency_names is None:
|
15 |
+
return {"error": "No dependency name provided"}
|
16 |
+
endlog.debug(f"Received Install Dependency request for {dependency_names}")
|
17 |
+
reqs = []
|
18 |
+
if comfy_mode == "embeded":
|
19 |
+
reqs = list(requirements.parse((here / "reqs_portable.txt").read_text()))
|
20 |
+
else:
|
21 |
+
reqs = list(requirements.parse((here / "reqs.txt").read_text()))
|
22 |
+
print([x.specs for x in reqs])
|
23 |
+
print(
|
24 |
+
"\n".join([f"{x.line} {''.join(x.specs[0] if x.specs else '')}" for x in reqs])
|
25 |
+
)
|
26 |
+
for dependency_name in dependency_names:
|
27 |
+
for req in reqs:
|
28 |
+
if req.name == dependency_name:
|
29 |
+
endlog.debug(f"Dependency {dependency_name} installed")
|
30 |
+
break
|
31 |
+
return {"success": True}
|
32 |
+
|
33 |
+
|
34 |
+
def ACTIONS_getStyles(style_name=None):
|
35 |
+
from .nodes.conditions import StylesLoader
|
36 |
+
|
37 |
+
styles = StylesLoader.options
|
38 |
+
match_list = ["name"]
|
39 |
+
if styles:
|
40 |
+
filtered_styles = {
|
41 |
+
key: value
|
42 |
+
for key, value in styles.items()
|
43 |
+
if not key.startswith("__") and key not in match_list
|
44 |
+
}
|
45 |
+
if style_name:
|
46 |
+
return filtered_styles.get(style_name, {"error": "Style not found"})
|
47 |
+
return filtered_styles
|
48 |
+
return {"error": "No styles found"}
|
49 |
+
|
50 |
+
|
51 |
+
async def do_action(request) -> web.Response:
|
52 |
+
endlog.debug("Init action request")
|
53 |
+
request_data = await request.json()
|
54 |
+
name = request_data.get("name")
|
55 |
+
args = request_data.get("args")
|
56 |
+
|
57 |
+
endlog.debug(f"Received action request: {name} {args}")
|
58 |
+
|
59 |
+
method_name = f"ACTIONS_{name}"
|
60 |
+
method = globals().get(method_name)
|
61 |
+
|
62 |
+
if callable(method):
|
63 |
+
result = method(args) if args else method()
|
64 |
+
endlog.debug(f"Action result: {result}")
|
65 |
+
return web.json_response({"result": result})
|
66 |
+
|
67 |
+
available_methods = [
|
68 |
+
attr[len("ACTIONS_") :] for attr in globals() if attr.startswith("ACTIONS_")
|
69 |
+
]
|
70 |
+
|
71 |
+
return web.json_response(
|
72 |
+
{"error": "Invalid method name.", "available_methods": available_methods}
|
73 |
+
)
|
74 |
+
|
75 |
+
|
76 |
+
# - HTML UTILS
|
77 |
+
|
78 |
+
|
79 |
+
def dependencies_button(name, dependencies):
|
80 |
+
deps = ",".join([f"'{x}'" for x in dependencies])
|
81 |
+
return f"""
|
82 |
+
<button class="dependency-button" onclick="window.mtb_action('installDependency',[{deps}])">Install {name} deps</button>
|
83 |
+
"""
|
84 |
+
|
85 |
+
|
86 |
+
def render_table(table_dict, sort=True, title=None):
|
87 |
+
table_dict = sorted(
|
88 |
+
table_dict.items(), key=lambda item: item[0]
|
89 |
+
) # Sort the dictionary by keys
|
90 |
+
|
91 |
+
table_rows = ""
|
92 |
+
for name, item in table_dict:
|
93 |
+
if isinstance(item, dict):
|
94 |
+
if "dependencies" in item:
|
95 |
+
table_rows += f"<tr><td>{name}</td><td>"
|
96 |
+
table_rows += f"{dependencies_button(name,item['dependencies'])}"
|
97 |
+
|
98 |
+
table_rows += "</td></tr>"
|
99 |
+
else:
|
100 |
+
table_rows += f"<tr><td>{name}</td><td>{render_table(item)}</td></tr>"
|
101 |
+
# elif isinstance(item, str):
|
102 |
+
# table_rows += f"<tr><td>{name}</td><td>{item}</td></tr>"
|
103 |
+
else:
|
104 |
+
table_rows += f"<tr><td>{name}</td><td>{item}</td></tr>"
|
105 |
+
|
106 |
+
return f"""
|
107 |
+
<div class="table-container">
|
108 |
+
{"" if title is None else f"<h1>{title}</h1>"}
|
109 |
+
<table>
|
110 |
+
<thead>
|
111 |
+
<tr>
|
112 |
+
<th>Name</th>
|
113 |
+
<th>Description</th>
|
114 |
+
</tr>
|
115 |
+
</thead>
|
116 |
+
<tbody>
|
117 |
+
{table_rows}
|
118 |
+
</tbody>
|
119 |
+
</table>
|
120 |
+
</div>
|
121 |
+
"""
|
122 |
+
|
123 |
+
|
124 |
+
def render_base_template(title, content):
|
125 |
+
css_content = ""
|
126 |
+
css_path = here / "html" / "style.css"
|
127 |
+
if css_path:
|
128 |
+
with open(css_path, "r") as css_file:
|
129 |
+
css_content = css_file.read()
|
130 |
+
|
131 |
+
github_icon_svg = """<svg xmlns="http://www.w3.org/2000/svg" fill="whitesmoke" height="3em" viewBox="0 0 496 512"><path d="M165.9 397.4c0 2-2.3 3.6-5.2 3.6-3.3.3-5.6-1.3-5.6-3.6 0-2 2.3-3.6 5.2-3.6 3-.3 5.6 1.3 5.6 3.6zm-31.1-4.5c-.7 2 1.3 4.3 4.3 4.9 2.6 1 5.6 0 6.2-2s-1.3-4.3-4.3-5.2c-2.6-.7-5.5.3-6.2 2.3zm44.2-1.7c-2.9.7-4.9 2.6-4.6 4.9.3 2 2.9 3.3 5.9 2.6 2.9-.7 4.9-2.6 4.6-4.6-.3-1.9-3-3.2-5.9-2.9zM244.8 8C106.1 8 0 113.3 0 252c0 110.9 69.8 205.8 169.5 239.2 12.8 2.3 17.3-5.6 17.3-12.1 0-6.2-.3-40.4-.3-61.4 0 0-70 15-84.7-29.8 0 0-11.4-29.1-27.8-36.6 0 0-22.9-15.7 1.6-15.4 0 0 24.9 2 38.6 25.8 21.9 38.6 58.6 27.5 72.9 20.9 2.3-16 8.8-27.1 16-33.7-55.9-6.2-112.3-14.3-112.3-110.5 0-27.5 7.6-41.3 23.6-58.9-2.6-6.5-11.1-33.3 2.6-67.9 20.9-6.5 69 27 69 27 20-5.6 41.5-8.5 62.8-8.5s42.8 2.9 62.8 8.5c0 0 48.1-33.6 69-27 13.7 34.7 5.2 61.4 2.6 67.9 16 17.7 25.8 31.5 25.8 58.9 0 96.5-58.9 104.2-114.8 110.5 9.2 7.9 17 22.9 17 46.4 0 33.7-.3 75.4-.3 83.6 0 6.5 4.6 14.4 17.3 12.1C428.2 457.8 496 362.9 496 252 496 113.3 383.5 8 244.8 8zM97.2 352.9c-1.3 1-1 3.3.7 5.2 1.6 1.6 3.9 2.3 5.2 1 1.3-1 1-3.3-.7-5.2-1.6-1.6-3.9-2.3-5.2-1zm-10.8-8.1c-.7 1.3.3 2.9 2.3 3.9 1.6 1 3.6.7 4.3-.7.7-1.3-.3-2.9-2.3-3.9-2-.6-3.6-.3-4.3.7zm32.4 35.6c-1.6 1.3-1 4.3 1.3 6.2 2.3 2.3 5.2 2.6 6.5 1 1.3-1.3.7-4.3-1.3-6.2-2.2-2.3-5.2-2.6-6.5-1zm-11.4-14.7c-1.6 1-1.6 3.6 0 5.9 1.6 2.3 4.3 3.3 5.6 2.3 1.6-1.3 1.6-3.9 0-6.2-1.4-2.3-4-3.3-5.6-2z"/></svg>"""
|
132 |
+
return f"""
|
133 |
+
<!DOCTYPE html>
|
134 |
+
<html>
|
135 |
+
<head>
|
136 |
+
<title>{title}</title>
|
137 |
+
<style>
|
138 |
+
{css_content}
|
139 |
+
</style>
|
140 |
+
</head>
|
141 |
+
<script type="module">
|
142 |
+
import {{ api }} from '/scripts/api.js'
|
143 |
+
const mtb_action = async (action, args) =>{{
|
144 |
+
console.log(`Sending ${{action}} with args: ${{args}}`)
|
145 |
+
}}
|
146 |
+
window.mtb_action = async (action, args) =>{{
|
147 |
+
console.log(`Sending ${{action}} with args: ${{args}} to the API`)
|
148 |
+
const res = await api.fetchApi('/actions', {{
|
149 |
+
method: 'POST',
|
150 |
+
body: JSON.stringify({{
|
151 |
+
name: action,
|
152 |
+
args,
|
153 |
+
}}),
|
154 |
+
}})
|
155 |
+
|
156 |
+
const output = await res.json()
|
157 |
+
console.debug(`Received ${{action}} response:`, output)
|
158 |
+
if (output?.result?.error){{
|
159 |
+
alert(`An error occured: {{output?.result?.error}}`)
|
160 |
+
}}
|
161 |
+
return output?.result
|
162 |
+
}}
|
163 |
+
</script>
|
164 |
+
<body>
|
165 |
+
<header>
|
166 |
+
<a href="/">Back to Comfy</a>
|
167 |
+
<div class="mtb_logo">
|
168 |
+
<img src="https://repository-images.githubusercontent.com/649047066/a3eef9a7-20dd-4ef9-b839-884502d4e873" alt="Comfy MTB Logo" height="70" width="128">
|
169 |
+
<span class="title">Comfy MTB</span></div>
|
170 |
+
<a style="width:128px;text-align:center" href="https://www.github.com/melmass/comfy_mtb">
|
171 |
+
{github_icon_svg}
|
172 |
+
</a>
|
173 |
+
</header>
|
174 |
+
|
175 |
+
<main>
|
176 |
+
{content}
|
177 |
+
</main>
|
178 |
+
|
179 |
+
<footer>
|
180 |
+
<!-- Shared footer content here -->
|
181 |
+
</footer>
|
182 |
+
</body>
|
183 |
+
|
184 |
+
</html>
|
185 |
+
"""
|
comfy_mtb/examples/01-faceswap.json
ADDED
@@ -0,0 +1,1048 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 86,
|
3 |
+
"last_link_id": 178,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 59,
|
7 |
+
"type": "Reroute",
|
8 |
+
"pos": [
|
9 |
+
-150.35178124999982,
|
10 |
+
644.4360633544919
|
11 |
+
],
|
12 |
+
"size": [
|
13 |
+
75,
|
14 |
+
26
|
15 |
+
],
|
16 |
+
"flags": {},
|
17 |
+
"order": 13,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "",
|
22 |
+
"type": "*",
|
23 |
+
"link": 124
|
24 |
+
}
|
25 |
+
],
|
26 |
+
"outputs": [
|
27 |
+
{
|
28 |
+
"name": "",
|
29 |
+
"type": "VAE",
|
30 |
+
"links": [
|
31 |
+
139
|
32 |
+
],
|
33 |
+
"slot_index": 0
|
34 |
+
}
|
35 |
+
],
|
36 |
+
"properties": {
|
37 |
+
"showOutputText": false,
|
38 |
+
"horizontal": false
|
39 |
+
}
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"id": 56,
|
43 |
+
"type": "Reroute",
|
44 |
+
"pos": [
|
45 |
+
-1580.8297949218763,
|
46 |
+
644.7740239257807
|
47 |
+
],
|
48 |
+
"size": [
|
49 |
+
75,
|
50 |
+
26
|
51 |
+
],
|
52 |
+
"flags": {},
|
53 |
+
"order": 9,
|
54 |
+
"mode": 0,
|
55 |
+
"inputs": [
|
56 |
+
{
|
57 |
+
"name": "",
|
58 |
+
"type": "*",
|
59 |
+
"link": 117
|
60 |
+
}
|
61 |
+
],
|
62 |
+
"outputs": [
|
63 |
+
{
|
64 |
+
"name": "",
|
65 |
+
"type": "VAE",
|
66 |
+
"links": [
|
67 |
+
124
|
68 |
+
]
|
69 |
+
}
|
70 |
+
],
|
71 |
+
"properties": {
|
72 |
+
"showOutputText": false,
|
73 |
+
"horizontal": false
|
74 |
+
}
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"id": 57,
|
78 |
+
"type": "Reroute",
|
79 |
+
"pos": [
|
80 |
+
-673.8297949218747,
|
81 |
+
-185.22597607421872
|
82 |
+
],
|
83 |
+
"size": [
|
84 |
+
75,
|
85 |
+
26
|
86 |
+
],
|
87 |
+
"flags": {},
|
88 |
+
"order": 12,
|
89 |
+
"mode": 0,
|
90 |
+
"inputs": [
|
91 |
+
{
|
92 |
+
"name": "",
|
93 |
+
"type": "*",
|
94 |
+
"link": 135
|
95 |
+
}
|
96 |
+
],
|
97 |
+
"outputs": [
|
98 |
+
{
|
99 |
+
"name": "",
|
100 |
+
"type": "MODEL",
|
101 |
+
"links": [
|
102 |
+
120
|
103 |
+
],
|
104 |
+
"slot_index": 0
|
105 |
+
}
|
106 |
+
],
|
107 |
+
"properties": {
|
108 |
+
"showOutputText": false,
|
109 |
+
"horizontal": false
|
110 |
+
}
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"id": 65,
|
114 |
+
"type": "Reroute",
|
115 |
+
"pos": [
|
116 |
+
-1512.8297949218763,
|
117 |
+
-181.22597607421872
|
118 |
+
],
|
119 |
+
"size": [
|
120 |
+
75,
|
121 |
+
26
|
122 |
+
],
|
123 |
+
"flags": {},
|
124 |
+
"order": 7,
|
125 |
+
"mode": 0,
|
126 |
+
"inputs": [
|
127 |
+
{
|
128 |
+
"name": "",
|
129 |
+
"type": "*",
|
130 |
+
"link": 134
|
131 |
+
}
|
132 |
+
],
|
133 |
+
"outputs": [
|
134 |
+
{
|
135 |
+
"name": "",
|
136 |
+
"type": "MODEL",
|
137 |
+
"links": [
|
138 |
+
135
|
139 |
+
]
|
140 |
+
}
|
141 |
+
],
|
142 |
+
"properties": {
|
143 |
+
"showOutputText": false,
|
144 |
+
"horizontal": false
|
145 |
+
}
|
146 |
+
},
|
147 |
+
{
|
148 |
+
"id": 7,
|
149 |
+
"type": "CLIPTextEncode",
|
150 |
+
"pos": [
|
151 |
+
-834.8297949218747,
|
152 |
+
206.77402392578134
|
153 |
+
],
|
154 |
+
"size": [
|
155 |
+
210,
|
156 |
+
54
|
157 |
+
],
|
158 |
+
"flags": {},
|
159 |
+
"order": 10,
|
160 |
+
"mode": 0,
|
161 |
+
"inputs": [
|
162 |
+
{
|
163 |
+
"name": "clip",
|
164 |
+
"type": "CLIP",
|
165 |
+
"link": 142
|
166 |
+
},
|
167 |
+
{
|
168 |
+
"name": "text",
|
169 |
+
"type": "STRING",
|
170 |
+
"link": 164,
|
171 |
+
"widget": {
|
172 |
+
"name": "text",
|
173 |
+
"config": [
|
174 |
+
"STRING",
|
175 |
+
{
|
176 |
+
"multiline": true
|
177 |
+
}
|
178 |
+
]
|
179 |
+
},
|
180 |
+
"slot_index": 1
|
181 |
+
}
|
182 |
+
],
|
183 |
+
"outputs": [
|
184 |
+
{
|
185 |
+
"name": "CONDITIONING",
|
186 |
+
"type": "CONDITIONING",
|
187 |
+
"links": [
|
188 |
+
6
|
189 |
+
],
|
190 |
+
"slot_index": 0
|
191 |
+
}
|
192 |
+
],
|
193 |
+
"properties": {
|
194 |
+
"Node name for S&R": "CLIPTextEncode"
|
195 |
+
},
|
196 |
+
"widgets_values": [
|
197 |
+
"worst quality, hands, embedding:EasyNegative,"
|
198 |
+
]
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"id": 5,
|
202 |
+
"type": "EmptyLatentImage",
|
203 |
+
"pos": [
|
204 |
+
-913.8297949218747,
|
205 |
+
326.77402392578125
|
206 |
+
],
|
207 |
+
"size": [
|
208 |
+
315,
|
209 |
+
106
|
210 |
+
],
|
211 |
+
"flags": {},
|
212 |
+
"order": 0,
|
213 |
+
"mode": 0,
|
214 |
+
"outputs": [
|
215 |
+
{
|
216 |
+
"name": "LATENT",
|
217 |
+
"type": "LATENT",
|
218 |
+
"links": [
|
219 |
+
2
|
220 |
+
],
|
221 |
+
"slot_index": 0
|
222 |
+
}
|
223 |
+
],
|
224 |
+
"properties": {
|
225 |
+
"Node name for S&R": "EmptyLatentImage"
|
226 |
+
},
|
227 |
+
"widgets_values": [
|
228 |
+
768,
|
229 |
+
512,
|
230 |
+
1
|
231 |
+
]
|
232 |
+
},
|
233 |
+
{
|
234 |
+
"id": 6,
|
235 |
+
"type": "CLIPTextEncode",
|
236 |
+
"pos": [
|
237 |
+
-1040,
|
238 |
+
-2
|
239 |
+
],
|
240 |
+
"size": [
|
241 |
+
422.84503173828125,
|
242 |
+
164.31304931640625
|
243 |
+
],
|
244 |
+
"flags": {},
|
245 |
+
"order": 8,
|
246 |
+
"mode": 0,
|
247 |
+
"inputs": [
|
248 |
+
{
|
249 |
+
"name": "clip",
|
250 |
+
"type": "CLIP",
|
251 |
+
"link": 141
|
252 |
+
}
|
253 |
+
],
|
254 |
+
"outputs": [
|
255 |
+
{
|
256 |
+
"name": "CONDITIONING",
|
257 |
+
"type": "CONDITIONING",
|
258 |
+
"links": [
|
259 |
+
4
|
260 |
+
],
|
261 |
+
"slot_index": 0
|
262 |
+
}
|
263 |
+
],
|
264 |
+
"properties": {
|
265 |
+
"Node name for S&R": "CLIPTextEncode"
|
266 |
+
},
|
267 |
+
"widgets_values": [
|
268 |
+
"Medium cinematic shot of an old Caucasian man smiling, (NYC 1995), trench coat, golden ring, brown eyes, (with a blue light saber)"
|
269 |
+
]
|
270 |
+
},
|
271 |
+
{
|
272 |
+
"id": 16,
|
273 |
+
"type": "CheckpointLoaderSimple",
|
274 |
+
"pos": [
|
275 |
+
-2001,
|
276 |
+
193
|
277 |
+
],
|
278 |
+
"size": [
|
279 |
+
315,
|
280 |
+
98
|
281 |
+
],
|
282 |
+
"flags": {},
|
283 |
+
"order": 1,
|
284 |
+
"mode": 0,
|
285 |
+
"outputs": [
|
286 |
+
{
|
287 |
+
"name": "MODEL",
|
288 |
+
"type": "MODEL",
|
289 |
+
"links": [
|
290 |
+
134
|
291 |
+
],
|
292 |
+
"slot_index": 0
|
293 |
+
},
|
294 |
+
{
|
295 |
+
"name": "CLIP",
|
296 |
+
"type": "CLIP",
|
297 |
+
"links": [
|
298 |
+
141,
|
299 |
+
142
|
300 |
+
],
|
301 |
+
"slot_index": 1
|
302 |
+
},
|
303 |
+
{
|
304 |
+
"name": "VAE",
|
305 |
+
"type": "VAE",
|
306 |
+
"links": [
|
307 |
+
117
|
308 |
+
],
|
309 |
+
"slot_index": 2
|
310 |
+
}
|
311 |
+
],
|
312 |
+
"properties": {
|
313 |
+
"Node name for S&R": "CheckpointLoaderSimple"
|
314 |
+
},
|
315 |
+
"widgets_values": [
|
316 |
+
"revAnimated_v122.safetensors"
|
317 |
+
]
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"id": 25,
|
321 |
+
"type": "PreviewImage",
|
322 |
+
"pos": [
|
323 |
+
2338.9229387812507,
|
324 |
+
-632.8275743593749
|
325 |
+
],
|
326 |
+
"size": [
|
327 |
+
726.28564453125,
|
328 |
+
475.3432312011719
|
329 |
+
],
|
330 |
+
"flags": {},
|
331 |
+
"order": 20,
|
332 |
+
"mode": 0,
|
333 |
+
"inputs": [
|
334 |
+
{
|
335 |
+
"name": "images",
|
336 |
+
"type": "IMAGE",
|
337 |
+
"link": 150
|
338 |
+
}
|
339 |
+
],
|
340 |
+
"properties": {
|
341 |
+
"Node name for S&R": "PreviewImage"
|
342 |
+
}
|
343 |
+
},
|
344 |
+
{
|
345 |
+
"id": 75,
|
346 |
+
"type": "PreviewImage",
|
347 |
+
"pos": [
|
348 |
+
1573.5946044921875,
|
349 |
+
-629.80322265625
|
350 |
+
],
|
351 |
+
"size": [
|
352 |
+
691.7459716796875,
|
353 |
+
479.6098327636719
|
354 |
+
],
|
355 |
+
"flags": {},
|
356 |
+
"order": 19,
|
357 |
+
"mode": 0,
|
358 |
+
"inputs": [
|
359 |
+
{
|
360 |
+
"name": "images",
|
361 |
+
"type": "IMAGE",
|
362 |
+
"link": 169
|
363 |
+
}
|
364 |
+
],
|
365 |
+
"properties": {
|
366 |
+
"Node name for S&R": "PreviewImage"
|
367 |
+
}
|
368 |
+
},
|
369 |
+
{
|
370 |
+
"id": 73,
|
371 |
+
"type": "PreviewImage",
|
372 |
+
"pos": [
|
373 |
+
760,
|
374 |
+
-630
|
375 |
+
],
|
376 |
+
"size": [
|
377 |
+
671.1859741210938,
|
378 |
+
483.1548156738281
|
379 |
+
],
|
380 |
+
"flags": {},
|
381 |
+
"order": 16,
|
382 |
+
"mode": 0,
|
383 |
+
"inputs": [
|
384 |
+
{
|
385 |
+
"name": "images",
|
386 |
+
"type": "IMAGE",
|
387 |
+
"link": 153
|
388 |
+
}
|
389 |
+
],
|
390 |
+
"properties": {
|
391 |
+
"Node name for S&R": "PreviewImage"
|
392 |
+
}
|
393 |
+
},
|
394 |
+
{
|
395 |
+
"id": 79,
|
396 |
+
"type": "Text box",
|
397 |
+
"pos": [
|
398 |
+
-1400,
|
399 |
+
276
|
400 |
+
],
|
401 |
+
"size": [
|
402 |
+
400,
|
403 |
+
200
|
404 |
+
],
|
405 |
+
"flags": {},
|
406 |
+
"order": 2,
|
407 |
+
"mode": 0,
|
408 |
+
"outputs": [
|
409 |
+
{
|
410 |
+
"name": "STRING",
|
411 |
+
"type": "STRING",
|
412 |
+
"links": [
|
413 |
+
164
|
414 |
+
],
|
415 |
+
"shape": 3,
|
416 |
+
"slot_index": 0
|
417 |
+
}
|
418 |
+
],
|
419 |
+
"title": "❌Mel Negatives (general) (Negative)",
|
420 |
+
"properties": {
|
421 |
+
"Node name for S&R": "Text box"
|
422 |
+
},
|
423 |
+
"widgets_values": [
|
424 |
+
"embedding:EasyNegative, embedding:EasyNegativeV2, watermark, text, deformed, disfigured, blurry"
|
425 |
+
]
|
426 |
+
},
|
427 |
+
{
|
428 |
+
"id": 66,
|
429 |
+
"type": "VAEDecodeTiled",
|
430 |
+
"pos": [
|
431 |
+
205,
|
432 |
+
-28
|
433 |
+
],
|
434 |
+
"size": [
|
435 |
+
210,
|
436 |
+
46
|
437 |
+
],
|
438 |
+
"flags": {
|
439 |
+
"collapsed": false
|
440 |
+
},
|
441 |
+
"order": 15,
|
442 |
+
"mode": 0,
|
443 |
+
"inputs": [
|
444 |
+
{
|
445 |
+
"name": "samples",
|
446 |
+
"type": "LATENT",
|
447 |
+
"link": 138
|
448 |
+
},
|
449 |
+
{
|
450 |
+
"name": "vae",
|
451 |
+
"type": "VAE",
|
452 |
+
"link": 139,
|
453 |
+
"slot_index": 1
|
454 |
+
}
|
455 |
+
],
|
456 |
+
"outputs": [
|
457 |
+
{
|
458 |
+
"name": "IMAGE",
|
459 |
+
"type": "IMAGE",
|
460 |
+
"links": [
|
461 |
+
153,
|
462 |
+
167
|
463 |
+
],
|
464 |
+
"shape": 3,
|
465 |
+
"slot_index": 0
|
466 |
+
}
|
467 |
+
],
|
468 |
+
"properties": {
|
469 |
+
"Node name for S&R": "VAEDecodeTiled"
|
470 |
+
}
|
471 |
+
},
|
472 |
+
{
|
473 |
+
"id": 70,
|
474 |
+
"type": "Restore Face (mtb)",
|
475 |
+
"pos": [
|
476 |
+
2320,
|
477 |
+
-50
|
478 |
+
],
|
479 |
+
"size": [
|
480 |
+
315,
|
481 |
+
186
|
482 |
+
],
|
483 |
+
"flags": {},
|
484 |
+
"order": 18,
|
485 |
+
"mode": 0,
|
486 |
+
"inputs": [
|
487 |
+
{
|
488 |
+
"name": "image",
|
489 |
+
"type": "IMAGE",
|
490 |
+
"link": 168
|
491 |
+
},
|
492 |
+
{
|
493 |
+
"name": "model",
|
494 |
+
"type": "FACEENHANCE_MODEL",
|
495 |
+
"link": 151,
|
496 |
+
"slot_index": 1
|
497 |
+
}
|
498 |
+
],
|
499 |
+
"outputs": [
|
500 |
+
{
|
501 |
+
"name": "IMAGE",
|
502 |
+
"type": "IMAGE",
|
503 |
+
"links": [
|
504 |
+
150
|
505 |
+
],
|
506 |
+
"shape": 3,
|
507 |
+
"slot_index": 0
|
508 |
+
}
|
509 |
+
],
|
510 |
+
"properties": {
|
511 |
+
"Node name for S&R": "Restore Face (mtb)"
|
512 |
+
},
|
513 |
+
"widgets_values": [
|
514 |
+
false,
|
515 |
+
false,
|
516 |
+
0.5,
|
517 |
+
true
|
518 |
+
]
|
519 |
+
},
|
520 |
+
{
|
521 |
+
"id": 72,
|
522 |
+
"type": "UpscaleModelLoader",
|
523 |
+
"pos": [
|
524 |
+
1715,
|
525 |
+
52
|
526 |
+
],
|
527 |
+
"size": [
|
528 |
+
260.3902282714844,
|
529 |
+
58
|
530 |
+
],
|
531 |
+
"flags": {},
|
532 |
+
"order": 3,
|
533 |
+
"mode": 0,
|
534 |
+
"outputs": [
|
535 |
+
{
|
536 |
+
"name": "UPSCALE_MODEL",
|
537 |
+
"type": "UPSCALE_MODEL",
|
538 |
+
"links": [
|
539 |
+
152
|
540 |
+
],
|
541 |
+
"shape": 3
|
542 |
+
}
|
543 |
+
],
|
544 |
+
"properties": {
|
545 |
+
"Node name for S&R": "UpscaleModelLoader"
|
546 |
+
},
|
547 |
+
"widgets_values": [
|
548 |
+
"4x-UltraSharp.pth"
|
549 |
+
]
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"id": 71,
|
553 |
+
"type": "Load Face Enhance Model (mtb)",
|
554 |
+
"pos": [
|
555 |
+
2005,
|
556 |
+
40
|
557 |
+
],
|
558 |
+
"size": [
|
559 |
+
265.97601318359375,
|
560 |
+
87.31192779541016
|
561 |
+
],
|
562 |
+
"flags": {},
|
563 |
+
"order": 11,
|
564 |
+
"mode": 0,
|
565 |
+
"inputs": [
|
566 |
+
{
|
567 |
+
"name": "bg_upsampler",
|
568 |
+
"type": "UPSCALE_MODEL",
|
569 |
+
"link": 152,
|
570 |
+
"slot_index": 0
|
571 |
+
}
|
572 |
+
],
|
573 |
+
"outputs": [
|
574 |
+
{
|
575 |
+
"name": "model",
|
576 |
+
"type": "FACEENHANCE_MODEL",
|
577 |
+
"links": [
|
578 |
+
151
|
579 |
+
],
|
580 |
+
"shape": 3
|
581 |
+
}
|
582 |
+
],
|
583 |
+
"properties": {
|
584 |
+
"Node name for S&R": "Load Face Enhance Model (mtb)"
|
585 |
+
},
|
586 |
+
"widgets_values": [
|
587 |
+
"GFPGANv1.4.pth",
|
588 |
+
2
|
589 |
+
]
|
590 |
+
},
|
591 |
+
{
|
592 |
+
"id": 76,
|
593 |
+
"type": "Load Image From Url (mtb)",
|
594 |
+
"pos": [
|
595 |
+
624,
|
596 |
+
39
|
597 |
+
],
|
598 |
+
"size": [
|
599 |
+
315,
|
600 |
+
58
|
601 |
+
],
|
602 |
+
"flags": {},
|
603 |
+
"order": 4,
|
604 |
+
"mode": 0,
|
605 |
+
"outputs": [
|
606 |
+
{
|
607 |
+
"name": "IMAGE",
|
608 |
+
"type": "IMAGE",
|
609 |
+
"links": [
|
610 |
+
166
|
611 |
+
],
|
612 |
+
"shape": 3,
|
613 |
+
"slot_index": 0
|
614 |
+
}
|
615 |
+
],
|
616 |
+
"properties": {
|
617 |
+
"Node name for S&R": "Load Image From Url (mtb)"
|
618 |
+
},
|
619 |
+
"widgets_values": [
|
620 |
+
"https://lucasmuseum.org/assets/general/Lucas_Headshot_Color_web.jpg"
|
621 |
+
]
|
622 |
+
},
|
623 |
+
{
|
624 |
+
"id": 69,
|
625 |
+
"type": "Load Face Swap Model (mtb)",
|
626 |
+
"pos": [
|
627 |
+
621,
|
628 |
+
252
|
629 |
+
],
|
630 |
+
"size": [
|
631 |
+
315,
|
632 |
+
58
|
633 |
+
],
|
634 |
+
"flags": {},
|
635 |
+
"order": 5,
|
636 |
+
"mode": 0,
|
637 |
+
"outputs": [
|
638 |
+
{
|
639 |
+
"name": "FACESWAP_MODEL",
|
640 |
+
"type": "FACESWAP_MODEL",
|
641 |
+
"links": [
|
642 |
+
165
|
643 |
+
],
|
644 |
+
"shape": 3
|
645 |
+
}
|
646 |
+
],
|
647 |
+
"properties": {
|
648 |
+
"Node name for S&R": "Load Face Swap Model (mtb)"
|
649 |
+
},
|
650 |
+
"widgets_values": [
|
651 |
+
"inswapper_128.onnx"
|
652 |
+
]
|
653 |
+
},
|
654 |
+
{
|
655 |
+
"id": 81,
|
656 |
+
"type": "Load Face Analysis Model (mtb)",
|
657 |
+
"pos": [
|
658 |
+
624,
|
659 |
+
144
|
660 |
+
],
|
661 |
+
"size": [
|
662 |
+
315,
|
663 |
+
58
|
664 |
+
],
|
665 |
+
"flags": {},
|
666 |
+
"order": 6,
|
667 |
+
"mode": 0,
|
668 |
+
"outputs": [
|
669 |
+
{
|
670 |
+
"name": "FACE_ANALYSIS_MODEL",
|
671 |
+
"type": "FACE_ANALYSIS_MODEL",
|
672 |
+
"links": [
|
673 |
+
170
|
674 |
+
],
|
675 |
+
"shape": 3
|
676 |
+
}
|
677 |
+
],
|
678 |
+
"properties": {
|
679 |
+
"Node name for S&R": "Load Face Analysis Model (mtb)"
|
680 |
+
},
|
681 |
+
"widgets_values": [
|
682 |
+
"buffalo_l"
|
683 |
+
]
|
684 |
+
},
|
685 |
+
{
|
686 |
+
"id": 80,
|
687 |
+
"type": "Face Swap (mtb)",
|
688 |
+
"pos": [
|
689 |
+
1255,
|
690 |
+
-32
|
691 |
+
],
|
692 |
+
"size": [
|
693 |
+
210,
|
694 |
+
154
|
695 |
+
],
|
696 |
+
"flags": {},
|
697 |
+
"order": 17,
|
698 |
+
"mode": 0,
|
699 |
+
"inputs": [
|
700 |
+
{
|
701 |
+
"name": "image",
|
702 |
+
"type": "IMAGE",
|
703 |
+
"link": 167
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"name": "reference",
|
707 |
+
"type": "IMAGE",
|
708 |
+
"link": 166
|
709 |
+
},
|
710 |
+
{
|
711 |
+
"name": "faceanalysis_model",
|
712 |
+
"type": "FACE_ANALYSIS_MODEL",
|
713 |
+
"link": 170,
|
714 |
+
"slot_index": 2
|
715 |
+
},
|
716 |
+
{
|
717 |
+
"name": "faceswap_model",
|
718 |
+
"type": "FACESWAP_MODEL",
|
719 |
+
"link": 165,
|
720 |
+
"slot_index": 3
|
721 |
+
}
|
722 |
+
],
|
723 |
+
"outputs": [
|
724 |
+
{
|
725 |
+
"name": "IMAGE",
|
726 |
+
"type": "IMAGE",
|
727 |
+
"links": [
|
728 |
+
168,
|
729 |
+
169
|
730 |
+
],
|
731 |
+
"shape": 3,
|
732 |
+
"slot_index": 0
|
733 |
+
}
|
734 |
+
],
|
735 |
+
"properties": {
|
736 |
+
"Node name for S&R": "Face Swap (mtb)"
|
737 |
+
},
|
738 |
+
"widgets_values": [
|
739 |
+
"0"
|
740 |
+
]
|
741 |
+
},
|
742 |
+
{
|
743 |
+
"id": 3,
|
744 |
+
"type": "KSampler",
|
745 |
+
"pos": [
|
746 |
+
-483,
|
747 |
+
-21
|
748 |
+
],
|
749 |
+
"size": [
|
750 |
+
315,
|
751 |
+
474
|
752 |
+
],
|
753 |
+
"flags": {},
|
754 |
+
"order": 14,
|
755 |
+
"mode": 0,
|
756 |
+
"inputs": [
|
757 |
+
{
|
758 |
+
"name": "model",
|
759 |
+
"type": "MODEL",
|
760 |
+
"link": 120
|
761 |
+
},
|
762 |
+
{
|
763 |
+
"name": "positive",
|
764 |
+
"type": "CONDITIONING",
|
765 |
+
"link": 4
|
766 |
+
},
|
767 |
+
{
|
768 |
+
"name": "negative",
|
769 |
+
"type": "CONDITIONING",
|
770 |
+
"link": 6
|
771 |
+
},
|
772 |
+
{
|
773 |
+
"name": "latent_image",
|
774 |
+
"type": "LATENT",
|
775 |
+
"link": 2
|
776 |
+
}
|
777 |
+
],
|
778 |
+
"outputs": [
|
779 |
+
{
|
780 |
+
"name": "LATENT",
|
781 |
+
"type": "LATENT",
|
782 |
+
"links": [
|
783 |
+
138
|
784 |
+
],
|
785 |
+
"slot_index": 0
|
786 |
+
}
|
787 |
+
],
|
788 |
+
"properties": {
|
789 |
+
"Node name for S&R": "KSampler"
|
790 |
+
},
|
791 |
+
"widgets_values": [
|
792 |
+
542071534529,
|
793 |
+
"fixed",
|
794 |
+
32,
|
795 |
+
9,
|
796 |
+
"dpmpp_2m",
|
797 |
+
"normal",
|
798 |
+
1
|
799 |
+
]
|
800 |
+
}
|
801 |
+
],
|
802 |
+
"links": [
|
803 |
+
[
|
804 |
+
2,
|
805 |
+
5,
|
806 |
+
0,
|
807 |
+
3,
|
808 |
+
3,
|
809 |
+
"LATENT"
|
810 |
+
],
|
811 |
+
[
|
812 |
+
4,
|
813 |
+
6,
|
814 |
+
0,
|
815 |
+
3,
|
816 |
+
1,
|
817 |
+
"CONDITIONING"
|
818 |
+
],
|
819 |
+
[
|
820 |
+
6,
|
821 |
+
7,
|
822 |
+
0,
|
823 |
+
3,
|
824 |
+
2,
|
825 |
+
"CONDITIONING"
|
826 |
+
],
|
827 |
+
[
|
828 |
+
117,
|
829 |
+
16,
|
830 |
+
2,
|
831 |
+
56,
|
832 |
+
0,
|
833 |
+
"*"
|
834 |
+
],
|
835 |
+
[
|
836 |
+
120,
|
837 |
+
57,
|
838 |
+
0,
|
839 |
+
3,
|
840 |
+
0,
|
841 |
+
"MODEL"
|
842 |
+
],
|
843 |
+
[
|
844 |
+
124,
|
845 |
+
56,
|
846 |
+
0,
|
847 |
+
59,
|
848 |
+
0,
|
849 |
+
"*"
|
850 |
+
],
|
851 |
+
[
|
852 |
+
134,
|
853 |
+
16,
|
854 |
+
0,
|
855 |
+
65,
|
856 |
+
0,
|
857 |
+
"*"
|
858 |
+
],
|
859 |
+
[
|
860 |
+
135,
|
861 |
+
65,
|
862 |
+
0,
|
863 |
+
57,
|
864 |
+
0,
|
865 |
+
"*"
|
866 |
+
],
|
867 |
+
[
|
868 |
+
138,
|
869 |
+
3,
|
870 |
+
0,
|
871 |
+
66,
|
872 |
+
0,
|
873 |
+
"LATENT"
|
874 |
+
],
|
875 |
+
[
|
876 |
+
139,
|
877 |
+
59,
|
878 |
+
0,
|
879 |
+
66,
|
880 |
+
1,
|
881 |
+
"VAE"
|
882 |
+
],
|
883 |
+
[
|
884 |
+
141,
|
885 |
+
16,
|
886 |
+
1,
|
887 |
+
6,
|
888 |
+
0,
|
889 |
+
"CLIP"
|
890 |
+
],
|
891 |
+
[
|
892 |
+
142,
|
893 |
+
16,
|
894 |
+
1,
|
895 |
+
7,
|
896 |
+
0,
|
897 |
+
"CLIP"
|
898 |
+
],
|
899 |
+
[
|
900 |
+
150,
|
901 |
+
70,
|
902 |
+
0,
|
903 |
+
25,
|
904 |
+
0,
|
905 |
+
"IMAGE"
|
906 |
+
],
|
907 |
+
[
|
908 |
+
151,
|
909 |
+
71,
|
910 |
+
0,
|
911 |
+
70,
|
912 |
+
1,
|
913 |
+
"FACEENHANCE_MODEL"
|
914 |
+
],
|
915 |
+
[
|
916 |
+
152,
|
917 |
+
72,
|
918 |
+
0,
|
919 |
+
71,
|
920 |
+
0,
|
921 |
+
"UPSCALE_MODEL"
|
922 |
+
],
|
923 |
+
[
|
924 |
+
153,
|
925 |
+
66,
|
926 |
+
0,
|
927 |
+
73,
|
928 |
+
0,
|
929 |
+
"IMAGE"
|
930 |
+
],
|
931 |
+
[
|
932 |
+
164,
|
933 |
+
79,
|
934 |
+
0,
|
935 |
+
7,
|
936 |
+
1,
|
937 |
+
"STRING"
|
938 |
+
],
|
939 |
+
[
|
940 |
+
165,
|
941 |
+
69,
|
942 |
+
0,
|
943 |
+
80,
|
944 |
+
3,
|
945 |
+
"FACESWAP_MODEL"
|
946 |
+
],
|
947 |
+
[
|
948 |
+
166,
|
949 |
+
76,
|
950 |
+
0,
|
951 |
+
80,
|
952 |
+
1,
|
953 |
+
"IMAGE"
|
954 |
+
],
|
955 |
+
[
|
956 |
+
167,
|
957 |
+
66,
|
958 |
+
0,
|
959 |
+
80,
|
960 |
+
0,
|
961 |
+
"IMAGE"
|
962 |
+
],
|
963 |
+
[
|
964 |
+
168,
|
965 |
+
80,
|
966 |
+
0,
|
967 |
+
70,
|
968 |
+
0,
|
969 |
+
"IMAGE"
|
970 |
+
],
|
971 |
+
[
|
972 |
+
169,
|
973 |
+
80,
|
974 |
+
0,
|
975 |
+
75,
|
976 |
+
0,
|
977 |
+
"IMAGE"
|
978 |
+
],
|
979 |
+
[
|
980 |
+
170,
|
981 |
+
81,
|
982 |
+
0,
|
983 |
+
80,
|
984 |
+
2,
|
985 |
+
"FACE_ANALYSIS_MODEL"
|
986 |
+
]
|
987 |
+
],
|
988 |
+
"groups": [
|
989 |
+
{
|
990 |
+
"title": "Txt2Img",
|
991 |
+
"bounding": [
|
992 |
+
-2061,
|
993 |
+
-234,
|
994 |
+
1932,
|
995 |
+
973
|
996 |
+
],
|
997 |
+
"color": "#a1309b",
|
998 |
+
"locked": false
|
999 |
+
},
|
1000 |
+
{
|
1001 |
+
"title": "Save Intermediate Image",
|
1002 |
+
"bounding": [
|
1003 |
+
147,
|
1004 |
+
-152,
|
1005 |
+
303,
|
1006 |
+
213
|
1007 |
+
],
|
1008 |
+
"color": "#3f789e",
|
1009 |
+
"locked": false
|
1010 |
+
},
|
1011 |
+
{
|
1012 |
+
"title": "SWAP & RESTORED",
|
1013 |
+
"bounding": [
|
1014 |
+
2305,
|
1015 |
+
-741,
|
1016 |
+
789,
|
1017 |
+
638
|
1018 |
+
],
|
1019 |
+
"color": "#3f789e",
|
1020 |
+
"locked": false
|
1021 |
+
},
|
1022 |
+
{
|
1023 |
+
"title": "SWAP",
|
1024 |
+
"bounding": [
|
1025 |
+
1520,
|
1026 |
+
-743,
|
1027 |
+
774,
|
1028 |
+
642
|
1029 |
+
],
|
1030 |
+
"color": "#3f789e",
|
1031 |
+
"locked": false
|
1032 |
+
},
|
1033 |
+
{
|
1034 |
+
"title": "SD OUTPUT",
|
1035 |
+
"bounding": [
|
1036 |
+
655,
|
1037 |
+
-745,
|
1038 |
+
854,
|
1039 |
+
648
|
1040 |
+
],
|
1041 |
+
"color": "#3f789e",
|
1042 |
+
"locked": false
|
1043 |
+
}
|
1044 |
+
],
|
1045 |
+
"config": {},
|
1046 |
+
"extra": {},
|
1047 |
+
"version": 0.4
|
1048 |
+
}
|
comfy_mtb/examples/02-film_interpolation.json
ADDED
@@ -0,0 +1,1327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 86,
|
3 |
+
"last_link_id": 172,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 59,
|
7 |
+
"type": "Reroute",
|
8 |
+
"pos": [
|
9 |
+
-670,
|
10 |
+
980
|
11 |
+
],
|
12 |
+
"size": [
|
13 |
+
75,
|
14 |
+
26
|
15 |
+
],
|
16 |
+
"flags": {},
|
17 |
+
"order": 13,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "",
|
22 |
+
"type": "*",
|
23 |
+
"link": 124
|
24 |
+
}
|
25 |
+
],
|
26 |
+
"outputs": [
|
27 |
+
{
|
28 |
+
"name": "",
|
29 |
+
"type": "VAE",
|
30 |
+
"links": [
|
31 |
+
139,
|
32 |
+
154
|
33 |
+
],
|
34 |
+
"slot_index": 0
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"properties": {
|
38 |
+
"showOutputText": false,
|
39 |
+
"horizontal": false
|
40 |
+
}
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"id": 56,
|
44 |
+
"type": "Reroute",
|
45 |
+
"pos": [
|
46 |
+
-2050,
|
47 |
+
980
|
48 |
+
],
|
49 |
+
"size": [
|
50 |
+
75,
|
51 |
+
26
|
52 |
+
],
|
53 |
+
"flags": {},
|
54 |
+
"order": 8,
|
55 |
+
"mode": 0,
|
56 |
+
"inputs": [
|
57 |
+
{
|
58 |
+
"name": "",
|
59 |
+
"type": "*",
|
60 |
+
"link": 117
|
61 |
+
}
|
62 |
+
],
|
63 |
+
"outputs": [
|
64 |
+
{
|
65 |
+
"name": "",
|
66 |
+
"type": "VAE",
|
67 |
+
"links": [
|
68 |
+
124
|
69 |
+
]
|
70 |
+
}
|
71 |
+
],
|
72 |
+
"properties": {
|
73 |
+
"showOutputText": false,
|
74 |
+
"horizontal": false
|
75 |
+
}
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"id": 57,
|
79 |
+
"type": "Reroute",
|
80 |
+
"pos": [
|
81 |
+
-1200,
|
82 |
+
150
|
83 |
+
],
|
84 |
+
"size": [
|
85 |
+
75,
|
86 |
+
26
|
87 |
+
],
|
88 |
+
"flags": {},
|
89 |
+
"order": 10,
|
90 |
+
"mode": 0,
|
91 |
+
"inputs": [
|
92 |
+
{
|
93 |
+
"name": "",
|
94 |
+
"type": "*",
|
95 |
+
"link": 135
|
96 |
+
}
|
97 |
+
],
|
98 |
+
"outputs": [
|
99 |
+
{
|
100 |
+
"name": "",
|
101 |
+
"type": "MODEL",
|
102 |
+
"links": [
|
103 |
+
120,
|
104 |
+
143
|
105 |
+
],
|
106 |
+
"slot_index": 0
|
107 |
+
}
|
108 |
+
],
|
109 |
+
"properties": {
|
110 |
+
"showOutputText": false,
|
111 |
+
"horizontal": false
|
112 |
+
}
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"id": 3,
|
116 |
+
"type": "KSampler",
|
117 |
+
"pos": [
|
118 |
+
-1010,
|
119 |
+
320
|
120 |
+
],
|
121 |
+
"size": [
|
122 |
+
315,
|
123 |
+
474
|
124 |
+
],
|
125 |
+
"flags": {},
|
126 |
+
"order": 15,
|
127 |
+
"mode": 0,
|
128 |
+
"inputs": [
|
129 |
+
{
|
130 |
+
"name": "model",
|
131 |
+
"type": "MODEL",
|
132 |
+
"link": 120
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"name": "positive",
|
136 |
+
"type": "CONDITIONING",
|
137 |
+
"link": 4
|
138 |
+
},
|
139 |
+
{
|
140 |
+
"name": "negative",
|
141 |
+
"type": "CONDITIONING",
|
142 |
+
"link": 6
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"name": "latent_image",
|
146 |
+
"type": "LATENT",
|
147 |
+
"link": 2
|
148 |
+
},
|
149 |
+
{
|
150 |
+
"name": "seed",
|
151 |
+
"type": "INT",
|
152 |
+
"link": 148,
|
153 |
+
"widget": {
|
154 |
+
"name": "seed",
|
155 |
+
"config": [
|
156 |
+
"INT",
|
157 |
+
{
|
158 |
+
"default": 0,
|
159 |
+
"min": 0,
|
160 |
+
"max": 18446744073709552000
|
161 |
+
}
|
162 |
+
]
|
163 |
+
}
|
164 |
+
}
|
165 |
+
],
|
166 |
+
"outputs": [
|
167 |
+
{
|
168 |
+
"name": "LATENT",
|
169 |
+
"type": "LATENT",
|
170 |
+
"links": [
|
171 |
+
138
|
172 |
+
],
|
173 |
+
"slot_index": 0
|
174 |
+
}
|
175 |
+
],
|
176 |
+
"properties": {
|
177 |
+
"Node name for S&R": "KSampler"
|
178 |
+
},
|
179 |
+
"widgets_values": [
|
180 |
+
1682,
|
181 |
+
"fixed",
|
182 |
+
45,
|
183 |
+
8,
|
184 |
+
"euler_ancestral",
|
185 |
+
"simple",
|
186 |
+
1
|
187 |
+
]
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"id": 79,
|
191 |
+
"type": "Load Film Model (mtb)",
|
192 |
+
"pos": [
|
193 |
+
720,
|
194 |
+
590
|
195 |
+
],
|
196 |
+
"size": [
|
197 |
+
315,
|
198 |
+
58
|
199 |
+
],
|
200 |
+
"flags": {},
|
201 |
+
"order": 0,
|
202 |
+
"mode": 0,
|
203 |
+
"outputs": [
|
204 |
+
{
|
205 |
+
"name": "FILM_MODEL",
|
206 |
+
"type": "FILM_MODEL",
|
207 |
+
"links": [
|
208 |
+
161
|
209 |
+
],
|
210 |
+
"shape": 3
|
211 |
+
}
|
212 |
+
],
|
213 |
+
"properties": {
|
214 |
+
"Node name for S&R": "Load Film Model (mtb)"
|
215 |
+
},
|
216 |
+
"widgets_values": [
|
217 |
+
"Style"
|
218 |
+
]
|
219 |
+
},
|
220 |
+
{
|
221 |
+
"id": 80,
|
222 |
+
"type": "PreviewImage",
|
223 |
+
"pos": [
|
224 |
+
1120,
|
225 |
+
190
|
226 |
+
],
|
227 |
+
"size": [
|
228 |
+
210,
|
229 |
+
246
|
230 |
+
],
|
231 |
+
"flags": {},
|
232 |
+
"order": 22,
|
233 |
+
"mode": 0,
|
234 |
+
"inputs": [
|
235 |
+
{
|
236 |
+
"name": "images",
|
237 |
+
"type": "IMAGE",
|
238 |
+
"link": 163,
|
239 |
+
"slot_index": 0
|
240 |
+
}
|
241 |
+
],
|
242 |
+
"properties": {
|
243 |
+
"Node name for S&R": "PreviewImage"
|
244 |
+
}
|
245 |
+
},
|
246 |
+
{
|
247 |
+
"id": 71,
|
248 |
+
"type": "CLIPTextEncode",
|
249 |
+
"pos": [
|
250 |
+
-610,
|
251 |
+
400
|
252 |
+
],
|
253 |
+
"size": [
|
254 |
+
210,
|
255 |
+
75.28300476074219
|
256 |
+
],
|
257 |
+
"flags": {},
|
258 |
+
"order": 16,
|
259 |
+
"mode": 0,
|
260 |
+
"inputs": [
|
261 |
+
{
|
262 |
+
"name": "clip",
|
263 |
+
"type": "CLIP",
|
264 |
+
"link": 152,
|
265 |
+
"slot_index": 0
|
266 |
+
},
|
267 |
+
{
|
268 |
+
"name": "text",
|
269 |
+
"type": "STRING",
|
270 |
+
"link": 172,
|
271 |
+
"widget": {
|
272 |
+
"name": "text",
|
273 |
+
"config": [
|
274 |
+
"STRING",
|
275 |
+
{
|
276 |
+
"multiline": true
|
277 |
+
}
|
278 |
+
]
|
279 |
+
}
|
280 |
+
}
|
281 |
+
],
|
282 |
+
"outputs": [
|
283 |
+
{
|
284 |
+
"name": "CONDITIONING",
|
285 |
+
"type": "CONDITIONING",
|
286 |
+
"links": [
|
287 |
+
144
|
288 |
+
],
|
289 |
+
"slot_index": 0
|
290 |
+
}
|
291 |
+
],
|
292 |
+
"properties": {
|
293 |
+
"Node name for S&R": "CLIPTextEncode"
|
294 |
+
},
|
295 |
+
"widgets_values": [
|
296 |
+
"Face of a man (looking down), rim lighting, tokyo 1987"
|
297 |
+
]
|
298 |
+
},
|
299 |
+
{
|
300 |
+
"id": 19,
|
301 |
+
"type": "CLIPSetLastLayer",
|
302 |
+
"pos": [
|
303 |
+
-1980,
|
304 |
+
400
|
305 |
+
],
|
306 |
+
"size": [
|
307 |
+
315,
|
308 |
+
58
|
309 |
+
],
|
310 |
+
"flags": {},
|
311 |
+
"order": 7,
|
312 |
+
"mode": 0,
|
313 |
+
"inputs": [
|
314 |
+
{
|
315 |
+
"name": "clip",
|
316 |
+
"type": "CLIP",
|
317 |
+
"link": 116
|
318 |
+
}
|
319 |
+
],
|
320 |
+
"outputs": [
|
321 |
+
{
|
322 |
+
"name": "CLIP",
|
323 |
+
"type": "CLIP",
|
324 |
+
"links": [
|
325 |
+
28,
|
326 |
+
29,
|
327 |
+
152
|
328 |
+
],
|
329 |
+
"shape": 3,
|
330 |
+
"slot_index": 0
|
331 |
+
}
|
332 |
+
],
|
333 |
+
"properties": {
|
334 |
+
"Node name for S&R": "CLIPSetLastLayer"
|
335 |
+
},
|
336 |
+
"widgets_values": [
|
337 |
+
-2
|
338 |
+
]
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"id": 6,
|
342 |
+
"type": "CLIPTextEncode",
|
343 |
+
"pos": [
|
344 |
+
-1320,
|
345 |
+
330
|
346 |
+
],
|
347 |
+
"size": [
|
348 |
+
210,
|
349 |
+
54
|
350 |
+
],
|
351 |
+
"flags": {},
|
352 |
+
"order": 11,
|
353 |
+
"mode": 0,
|
354 |
+
"inputs": [
|
355 |
+
{
|
356 |
+
"name": "clip",
|
357 |
+
"type": "CLIP",
|
358 |
+
"link": 28
|
359 |
+
},
|
360 |
+
{
|
361 |
+
"name": "text",
|
362 |
+
"type": "STRING",
|
363 |
+
"link": 167,
|
364 |
+
"widget": {
|
365 |
+
"name": "text",
|
366 |
+
"config": [
|
367 |
+
"STRING",
|
368 |
+
{
|
369 |
+
"multiline": true
|
370 |
+
}
|
371 |
+
]
|
372 |
+
}
|
373 |
+
}
|
374 |
+
],
|
375 |
+
"outputs": [
|
376 |
+
{
|
377 |
+
"name": "CONDITIONING",
|
378 |
+
"type": "CONDITIONING",
|
379 |
+
"links": [
|
380 |
+
4
|
381 |
+
],
|
382 |
+
"slot_index": 0
|
383 |
+
}
|
384 |
+
],
|
385 |
+
"properties": {
|
386 |
+
"Node name for S&R": "CLIPTextEncode"
|
387 |
+
},
|
388 |
+
"widgets_values": [
|
389 |
+
"Face of a man (looking down), rim lighting, tokyo 1987"
|
390 |
+
]
|
391 |
+
},
|
392 |
+
{
|
393 |
+
"id": 75,
|
394 |
+
"type": "VAEDecodeTiled",
|
395 |
+
"pos": [
|
396 |
+
220,
|
397 |
+
620
|
398 |
+
],
|
399 |
+
"size": [
|
400 |
+
210,
|
401 |
+
46
|
402 |
+
],
|
403 |
+
"flags": {
|
404 |
+
"collapsed": false
|
405 |
+
},
|
406 |
+
"order": 19,
|
407 |
+
"mode": 0,
|
408 |
+
"inputs": [
|
409 |
+
{
|
410 |
+
"name": "samples",
|
411 |
+
"type": "LATENT",
|
412 |
+
"link": 155,
|
413 |
+
"slot_index": 0
|
414 |
+
},
|
415 |
+
{
|
416 |
+
"name": "vae",
|
417 |
+
"type": "VAE",
|
418 |
+
"link": 154,
|
419 |
+
"slot_index": 1
|
420 |
+
}
|
421 |
+
],
|
422 |
+
"outputs": [
|
423 |
+
{
|
424 |
+
"name": "IMAGE",
|
425 |
+
"type": "IMAGE",
|
426 |
+
"links": [
|
427 |
+
158
|
428 |
+
],
|
429 |
+
"shape": 3,
|
430 |
+
"slot_index": 0
|
431 |
+
}
|
432 |
+
],
|
433 |
+
"properties": {
|
434 |
+
"Node name for S&R": "VAEDecodeTiled"
|
435 |
+
}
|
436 |
+
},
|
437 |
+
{
|
438 |
+
"id": 70,
|
439 |
+
"type": "KSampler",
|
440 |
+
"pos": [
|
441 |
+
-300,
|
442 |
+
380
|
443 |
+
],
|
444 |
+
"size": [
|
445 |
+
315,
|
446 |
+
474
|
447 |
+
],
|
448 |
+
"flags": {},
|
449 |
+
"order": 18,
|
450 |
+
"mode": 0,
|
451 |
+
"inputs": [
|
452 |
+
{
|
453 |
+
"name": "model",
|
454 |
+
"type": "MODEL",
|
455 |
+
"link": 143
|
456 |
+
},
|
457 |
+
{
|
458 |
+
"name": "positive",
|
459 |
+
"type": "CONDITIONING",
|
460 |
+
"link": 144
|
461 |
+
},
|
462 |
+
{
|
463 |
+
"name": "negative",
|
464 |
+
"type": "CONDITIONING",
|
465 |
+
"link": 145
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"name": "latent_image",
|
469 |
+
"type": "LATENT",
|
470 |
+
"link": 153,
|
471 |
+
"slot_index": 3
|
472 |
+
},
|
473 |
+
{
|
474 |
+
"name": "seed",
|
475 |
+
"type": "INT",
|
476 |
+
"link": 150,
|
477 |
+
"widget": {
|
478 |
+
"name": "seed",
|
479 |
+
"config": [
|
480 |
+
"INT",
|
481 |
+
{
|
482 |
+
"default": 0,
|
483 |
+
"min": 0,
|
484 |
+
"max": 18446744073709552000
|
485 |
+
}
|
486 |
+
]
|
487 |
+
},
|
488 |
+
"slot_index": 4
|
489 |
+
}
|
490 |
+
],
|
491 |
+
"outputs": [
|
492 |
+
{
|
493 |
+
"name": "LATENT",
|
494 |
+
"type": "LATENT",
|
495 |
+
"links": [
|
496 |
+
155
|
497 |
+
],
|
498 |
+
"slot_index": 0
|
499 |
+
}
|
500 |
+
],
|
501 |
+
"properties": {
|
502 |
+
"Node name for S&R": "KSampler"
|
503 |
+
},
|
504 |
+
"widgets_values": [
|
505 |
+
1682,
|
506 |
+
"fixed",
|
507 |
+
45,
|
508 |
+
8,
|
509 |
+
"euler_ancestral",
|
510 |
+
"simple",
|
511 |
+
1
|
512 |
+
]
|
513 |
+
},
|
514 |
+
{
|
515 |
+
"id": 16,
|
516 |
+
"type": "CheckpointLoaderSimple",
|
517 |
+
"pos": [
|
518 |
+
-2470,
|
519 |
+
530
|
520 |
+
],
|
521 |
+
"size": [
|
522 |
+
315,
|
523 |
+
98
|
524 |
+
],
|
525 |
+
"flags": {},
|
526 |
+
"order": 1,
|
527 |
+
"mode": 0,
|
528 |
+
"outputs": [
|
529 |
+
{
|
530 |
+
"name": "MODEL",
|
531 |
+
"type": "MODEL",
|
532 |
+
"links": [
|
533 |
+
134
|
534 |
+
],
|
535 |
+
"slot_index": 0
|
536 |
+
},
|
537 |
+
{
|
538 |
+
"name": "CLIP",
|
539 |
+
"type": "CLIP",
|
540 |
+
"links": [
|
541 |
+
116
|
542 |
+
],
|
543 |
+
"slot_index": 1
|
544 |
+
},
|
545 |
+
{
|
546 |
+
"name": "VAE",
|
547 |
+
"type": "VAE",
|
548 |
+
"links": [
|
549 |
+
117
|
550 |
+
],
|
551 |
+
"slot_index": 2
|
552 |
+
}
|
553 |
+
],
|
554 |
+
"properties": {
|
555 |
+
"Node name for S&R": "CheckpointLoaderSimple"
|
556 |
+
},
|
557 |
+
"widgets_values": [
|
558 |
+
"revAnimated_v122.safetensors"
|
559 |
+
]
|
560 |
+
},
|
561 |
+
{
|
562 |
+
"id": 66,
|
563 |
+
"type": "VAEDecodeTiled",
|
564 |
+
"pos": [
|
565 |
+
231,
|
566 |
+
403
|
567 |
+
],
|
568 |
+
"size": [
|
569 |
+
210,
|
570 |
+
46
|
571 |
+
],
|
572 |
+
"flags": {
|
573 |
+
"collapsed": false
|
574 |
+
},
|
575 |
+
"order": 17,
|
576 |
+
"mode": 0,
|
577 |
+
"inputs": [
|
578 |
+
{
|
579 |
+
"name": "samples",
|
580 |
+
"type": "LATENT",
|
581 |
+
"link": 138
|
582 |
+
},
|
583 |
+
{
|
584 |
+
"name": "vae",
|
585 |
+
"type": "VAE",
|
586 |
+
"link": 139,
|
587 |
+
"slot_index": 1
|
588 |
+
}
|
589 |
+
],
|
590 |
+
"outputs": [
|
591 |
+
{
|
592 |
+
"name": "IMAGE",
|
593 |
+
"type": "IMAGE",
|
594 |
+
"links": [
|
595 |
+
157
|
596 |
+
],
|
597 |
+
"shape": 3,
|
598 |
+
"slot_index": 0
|
599 |
+
}
|
600 |
+
],
|
601 |
+
"properties": {
|
602 |
+
"Node name for S&R": "VAEDecodeTiled"
|
603 |
+
}
|
604 |
+
},
|
605 |
+
{
|
606 |
+
"id": 77,
|
607 |
+
"type": "Concat Images (mtb)",
|
608 |
+
"pos": [
|
609 |
+
661,
|
610 |
+
402
|
611 |
+
],
|
612 |
+
"size": [
|
613 |
+
210,
|
614 |
+
46
|
615 |
+
],
|
616 |
+
"flags": {},
|
617 |
+
"order": 20,
|
618 |
+
"mode": 0,
|
619 |
+
"inputs": [
|
620 |
+
{
|
621 |
+
"name": "imageA",
|
622 |
+
"type": "IMAGE",
|
623 |
+
"link": 157
|
624 |
+
},
|
625 |
+
{
|
626 |
+
"name": "imageB",
|
627 |
+
"type": "IMAGE",
|
628 |
+
"link": 158
|
629 |
+
}
|
630 |
+
],
|
631 |
+
"outputs": [
|
632 |
+
{
|
633 |
+
"name": "IMAGE",
|
634 |
+
"type": "IMAGE",
|
635 |
+
"links": [
|
636 |
+
160,
|
637 |
+
163
|
638 |
+
],
|
639 |
+
"shape": 3,
|
640 |
+
"slot_index": 0
|
641 |
+
}
|
642 |
+
],
|
643 |
+
"properties": {
|
644 |
+
"Node name for S&R": "Concat Images (mtb)"
|
645 |
+
}
|
646 |
+
},
|
647 |
+
{
|
648 |
+
"id": 65,
|
649 |
+
"type": "Reroute",
|
650 |
+
"pos": [
|
651 |
+
-1810,
|
652 |
+
148
|
653 |
+
],
|
654 |
+
"size": [
|
655 |
+
75,
|
656 |
+
26
|
657 |
+
],
|
658 |
+
"flags": {},
|
659 |
+
"order": 6,
|
660 |
+
"mode": 0,
|
661 |
+
"inputs": [
|
662 |
+
{
|
663 |
+
"name": "",
|
664 |
+
"type": "*",
|
665 |
+
"link": 134
|
666 |
+
}
|
667 |
+
],
|
668 |
+
"outputs": [
|
669 |
+
{
|
670 |
+
"name": "",
|
671 |
+
"type": "MODEL",
|
672 |
+
"links": [
|
673 |
+
135
|
674 |
+
]
|
675 |
+
}
|
676 |
+
],
|
677 |
+
"properties": {
|
678 |
+
"showOutputText": false,
|
679 |
+
"horizontal": false
|
680 |
+
}
|
681 |
+
},
|
682 |
+
{
|
683 |
+
"id": 81,
|
684 |
+
"type": "Reroute",
|
685 |
+
"pos": [
|
686 |
+
-1821,
|
687 |
+
1
|
688 |
+
],
|
689 |
+
"size": [
|
690 |
+
75,
|
691 |
+
26
|
692 |
+
],
|
693 |
+
"flags": {},
|
694 |
+
"order": 9,
|
695 |
+
"mode": 0,
|
696 |
+
"inputs": [
|
697 |
+
{
|
698 |
+
"name": "",
|
699 |
+
"type": "*",
|
700 |
+
"link": 166
|
701 |
+
}
|
702 |
+
],
|
703 |
+
"outputs": [
|
704 |
+
{
|
705 |
+
"name": "",
|
706 |
+
"type": "STRING",
|
707 |
+
"links": [
|
708 |
+
165
|
709 |
+
]
|
710 |
+
}
|
711 |
+
],
|
712 |
+
"properties": {
|
713 |
+
"showOutputText": false,
|
714 |
+
"horizontal": false
|
715 |
+
}
|
716 |
+
},
|
717 |
+
{
|
718 |
+
"id": 84,
|
719 |
+
"type": "Text box",
|
720 |
+
"pos": [
|
721 |
+
-1669,
|
722 |
+
498
|
723 |
+
],
|
724 |
+
"size": [
|
725 |
+
294,
|
726 |
+
104.4554214477539
|
727 |
+
],
|
728 |
+
"flags": {},
|
729 |
+
"order": 2,
|
730 |
+
"mode": 0,
|
731 |
+
"outputs": [
|
732 |
+
{
|
733 |
+
"name": "STRING",
|
734 |
+
"type": "STRING",
|
735 |
+
"links": [
|
736 |
+
168
|
737 |
+
],
|
738 |
+
"shape": 3,
|
739 |
+
"slot_index": 0
|
740 |
+
}
|
741 |
+
],
|
742 |
+
"title": "❌Mel Negatives (general) (Negative)",
|
743 |
+
"properties": {
|
744 |
+
"Node name for S&R": "Text box"
|
745 |
+
},
|
746 |
+
"widgets_values": [
|
747 |
+
"embedding:EasyNegative, embedding:EasyNegativeV2, watermark, text, deformed, bad anatomy, disfigured"
|
748 |
+
]
|
749 |
+
},
|
750 |
+
{
|
751 |
+
"id": 7,
|
752 |
+
"type": "CLIPTextEncode",
|
753 |
+
"pos": [
|
754 |
+
-1317,
|
755 |
+
424
|
756 |
+
],
|
757 |
+
"size": [
|
758 |
+
210,
|
759 |
+
54
|
760 |
+
],
|
761 |
+
"flags": {},
|
762 |
+
"order": 12,
|
763 |
+
"mode": 0,
|
764 |
+
"inputs": [
|
765 |
+
{
|
766 |
+
"name": "clip",
|
767 |
+
"type": "CLIP",
|
768 |
+
"link": 29
|
769 |
+
},
|
770 |
+
{
|
771 |
+
"name": "text",
|
772 |
+
"type": "STRING",
|
773 |
+
"link": 168,
|
774 |
+
"widget": {
|
775 |
+
"name": "text",
|
776 |
+
"config": [
|
777 |
+
"STRING",
|
778 |
+
{
|
779 |
+
"multiline": true
|
780 |
+
}
|
781 |
+
]
|
782 |
+
},
|
783 |
+
"slot_index": 1
|
784 |
+
}
|
785 |
+
],
|
786 |
+
"outputs": [
|
787 |
+
{
|
788 |
+
"name": "CONDITIONING",
|
789 |
+
"type": "CONDITIONING",
|
790 |
+
"links": [
|
791 |
+
6,
|
792 |
+
145
|
793 |
+
],
|
794 |
+
"slot_index": 0
|
795 |
+
}
|
796 |
+
],
|
797 |
+
"properties": {
|
798 |
+
"Node name for S&R": "CLIPTextEncode"
|
799 |
+
},
|
800 |
+
"widgets_values": [
|
801 |
+
"worst quality, hands, embedding:EasyNegative,"
|
802 |
+
]
|
803 |
+
},
|
804 |
+
{
|
805 |
+
"id": 78,
|
806 |
+
"type": "Film Interpolation (mtb)",
|
807 |
+
"pos": [
|
808 |
+
1140,
|
809 |
+
520
|
810 |
+
],
|
811 |
+
"size": [
|
812 |
+
315,
|
813 |
+
78
|
814 |
+
],
|
815 |
+
"flags": {},
|
816 |
+
"order": 21,
|
817 |
+
"mode": 0,
|
818 |
+
"inputs": [
|
819 |
+
{
|
820 |
+
"name": "images",
|
821 |
+
"type": "IMAGE",
|
822 |
+
"link": 160,
|
823 |
+
"slot_index": 0
|
824 |
+
},
|
825 |
+
{
|
826 |
+
"name": "film_model",
|
827 |
+
"type": "FILM_MODEL",
|
828 |
+
"link": 161,
|
829 |
+
"slot_index": 1
|
830 |
+
}
|
831 |
+
],
|
832 |
+
"outputs": [
|
833 |
+
{
|
834 |
+
"name": "IMAGE",
|
835 |
+
"type": "IMAGE",
|
836 |
+
"links": [
|
837 |
+
169
|
838 |
+
],
|
839 |
+
"shape": 3,
|
840 |
+
"slot_index": 0
|
841 |
+
}
|
842 |
+
],
|
843 |
+
"properties": {
|
844 |
+
"Node name for S&R": "Film Interpolation (mtb)"
|
845 |
+
},
|
846 |
+
"widgets_values": [
|
847 |
+
4
|
848 |
+
]
|
849 |
+
},
|
850 |
+
{
|
851 |
+
"id": 83,
|
852 |
+
"type": "Text box",
|
853 |
+
"pos": [
|
854 |
+
-2456,
|
855 |
+
236
|
856 |
+
],
|
857 |
+
"size": [
|
858 |
+
400,
|
859 |
+
200
|
860 |
+
],
|
861 |
+
"flags": {},
|
862 |
+
"order": 3,
|
863 |
+
"mode": 0,
|
864 |
+
"outputs": [
|
865 |
+
{
|
866 |
+
"name": "STRING",
|
867 |
+
"type": "STRING",
|
868 |
+
"links": [
|
869 |
+
166,
|
870 |
+
167
|
871 |
+
],
|
872 |
+
"shape": 3,
|
873 |
+
"slot_index": 0
|
874 |
+
}
|
875 |
+
],
|
876 |
+
"properties": {
|
877 |
+
"Node name for S&R": "Text box"
|
878 |
+
},
|
879 |
+
"widgets_values": [
|
880 |
+
"Close up photo of the face of a Caucasian young man (looking down, and frowning), rim lighting, Tokyo 1987, Bernard, over a blue sky, blue eyes shaved close"
|
881 |
+
]
|
882 |
+
},
|
883 |
+
{
|
884 |
+
"id": 72,
|
885 |
+
"type": "PrimitiveNode",
|
886 |
+
"pos": [
|
887 |
+
-1290,
|
888 |
+
880
|
889 |
+
],
|
890 |
+
"size": [
|
891 |
+
210,
|
892 |
+
82
|
893 |
+
],
|
894 |
+
"flags": {},
|
895 |
+
"order": 4,
|
896 |
+
"mode": 0,
|
897 |
+
"outputs": [
|
898 |
+
{
|
899 |
+
"name": "INT",
|
900 |
+
"type": "INT",
|
901 |
+
"links": [
|
902 |
+
148,
|
903 |
+
150
|
904 |
+
],
|
905 |
+
"widget": {
|
906 |
+
"name": "seed",
|
907 |
+
"config": [
|
908 |
+
"INT",
|
909 |
+
{
|
910 |
+
"default": 0,
|
911 |
+
"min": 0,
|
912 |
+
"max": 18446744073709552000
|
913 |
+
}
|
914 |
+
]
|
915 |
+
},
|
916 |
+
"slot_index": 0
|
917 |
+
}
|
918 |
+
],
|
919 |
+
"title": "seed",
|
920 |
+
"properties": {},
|
921 |
+
"widgets_values": [
|
922 |
+
1682,
|
923 |
+
"fixed"
|
924 |
+
]
|
925 |
+
},
|
926 |
+
{
|
927 |
+
"id": 69,
|
928 |
+
"type": "String Replace (mtb)",
|
929 |
+
"pos": [
|
930 |
+
-1529,
|
931 |
+
-3
|
932 |
+
],
|
933 |
+
"size": [
|
934 |
+
315,
|
935 |
+
82
|
936 |
+
],
|
937 |
+
"flags": {},
|
938 |
+
"order": 14,
|
939 |
+
"mode": 0,
|
940 |
+
"inputs": [
|
941 |
+
{
|
942 |
+
"name": "string",
|
943 |
+
"type": "STRING",
|
944 |
+
"link": 165
|
945 |
+
}
|
946 |
+
],
|
947 |
+
"outputs": [
|
948 |
+
{
|
949 |
+
"name": "STRING",
|
950 |
+
"type": "STRING",
|
951 |
+
"links": [
|
952 |
+
172
|
953 |
+
],
|
954 |
+
"shape": 3,
|
955 |
+
"slot_index": 0
|
956 |
+
}
|
957 |
+
],
|
958 |
+
"properties": {
|
959 |
+
"Node name for S&R": "String Replace (mtb)"
|
960 |
+
},
|
961 |
+
"widgets_values": [
|
962 |
+
"a Caucasian young",
|
963 |
+
"an African old"
|
964 |
+
]
|
965 |
+
},
|
966 |
+
{
|
967 |
+
"id": 5,
|
968 |
+
"type": "EmptyLatentImage",
|
969 |
+
"pos": [
|
970 |
+
-1410,
|
971 |
+
660
|
972 |
+
],
|
973 |
+
"size": [
|
974 |
+
315,
|
975 |
+
106
|
976 |
+
],
|
977 |
+
"flags": {},
|
978 |
+
"order": 5,
|
979 |
+
"mode": 0,
|
980 |
+
"outputs": [
|
981 |
+
{
|
982 |
+
"name": "LATENT",
|
983 |
+
"type": "LATENT",
|
984 |
+
"links": [
|
985 |
+
2,
|
986 |
+
153
|
987 |
+
],
|
988 |
+
"slot_index": 0
|
989 |
+
}
|
990 |
+
],
|
991 |
+
"properties": {
|
992 |
+
"Node name for S&R": "EmptyLatentImage"
|
993 |
+
},
|
994 |
+
"widgets_values": [
|
995 |
+
768,
|
996 |
+
320,
|
997 |
+
1
|
998 |
+
]
|
999 |
+
},
|
1000 |
+
{
|
1001 |
+
"id": 85,
|
1002 |
+
"type": "Save Gif (mtb)",
|
1003 |
+
"pos": [
|
1004 |
+
1519,
|
1005 |
+
364
|
1006 |
+
],
|
1007 |
+
"size": [
|
1008 |
+
862.6054045703117,
|
1009 |
+
496.63413712402314
|
1010 |
+
],
|
1011 |
+
"flags": {},
|
1012 |
+
"order": 23,
|
1013 |
+
"mode": 0,
|
1014 |
+
"inputs": [
|
1015 |
+
{
|
1016 |
+
"name": "image",
|
1017 |
+
"type": "IMAGE",
|
1018 |
+
"link": 169
|
1019 |
+
}
|
1020 |
+
],
|
1021 |
+
"properties": {
|
1022 |
+
"Node name for S&R": "Save Gif (mtb)"
|
1023 |
+
},
|
1024 |
+
"widgets_values": [
|
1025 |
+
12,
|
1026 |
+
0.7,
|
1027 |
+
true,
|
1028 |
+
"/view?filename=eda71478da.gif&subfolder=&type=output",
|
1029 |
+
"nearest",
|
1030 |
+
"/view?filename=421883da47.gif&subfolder=&type=output"
|
1031 |
+
]
|
1032 |
+
}
|
1033 |
+
],
|
1034 |
+
"links": [
|
1035 |
+
[
|
1036 |
+
2,
|
1037 |
+
5,
|
1038 |
+
0,
|
1039 |
+
3,
|
1040 |
+
3,
|
1041 |
+
"LATENT"
|
1042 |
+
],
|
1043 |
+
[
|
1044 |
+
4,
|
1045 |
+
6,
|
1046 |
+
0,
|
1047 |
+
3,
|
1048 |
+
1,
|
1049 |
+
"CONDITIONING"
|
1050 |
+
],
|
1051 |
+
[
|
1052 |
+
6,
|
1053 |
+
7,
|
1054 |
+
0,
|
1055 |
+
3,
|
1056 |
+
2,
|
1057 |
+
"CONDITIONING"
|
1058 |
+
],
|
1059 |
+
[
|
1060 |
+
28,
|
1061 |
+
19,
|
1062 |
+
0,
|
1063 |
+
6,
|
1064 |
+
0,
|
1065 |
+
"CLIP"
|
1066 |
+
],
|
1067 |
+
[
|
1068 |
+
29,
|
1069 |
+
19,
|
1070 |
+
0,
|
1071 |
+
7,
|
1072 |
+
0,
|
1073 |
+
"CLIP"
|
1074 |
+
],
|
1075 |
+
[
|
1076 |
+
116,
|
1077 |
+
16,
|
1078 |
+
1,
|
1079 |
+
19,
|
1080 |
+
0,
|
1081 |
+
"CLIP"
|
1082 |
+
],
|
1083 |
+
[
|
1084 |
+
117,
|
1085 |
+
16,
|
1086 |
+
2,
|
1087 |
+
56,
|
1088 |
+
0,
|
1089 |
+
"*"
|
1090 |
+
],
|
1091 |
+
[
|
1092 |
+
120,
|
1093 |
+
57,
|
1094 |
+
0,
|
1095 |
+
3,
|
1096 |
+
0,
|
1097 |
+
"MODEL"
|
1098 |
+
],
|
1099 |
+
[
|
1100 |
+
124,
|
1101 |
+
56,
|
1102 |
+
0,
|
1103 |
+
59,
|
1104 |
+
0,
|
1105 |
+
"*"
|
1106 |
+
],
|
1107 |
+
[
|
1108 |
+
134,
|
1109 |
+
16,
|
1110 |
+
0,
|
1111 |
+
65,
|
1112 |
+
0,
|
1113 |
+
"*"
|
1114 |
+
],
|
1115 |
+
[
|
1116 |
+
135,
|
1117 |
+
65,
|
1118 |
+
0,
|
1119 |
+
57,
|
1120 |
+
0,
|
1121 |
+
"*"
|
1122 |
+
],
|
1123 |
+
[
|
1124 |
+
138,
|
1125 |
+
3,
|
1126 |
+
0,
|
1127 |
+
66,
|
1128 |
+
0,
|
1129 |
+
"LATENT"
|
1130 |
+
],
|
1131 |
+
[
|
1132 |
+
139,
|
1133 |
+
59,
|
1134 |
+
0,
|
1135 |
+
66,
|
1136 |
+
1,
|
1137 |
+
"VAE"
|
1138 |
+
],
|
1139 |
+
[
|
1140 |
+
143,
|
1141 |
+
57,
|
1142 |
+
0,
|
1143 |
+
70,
|
1144 |
+
0,
|
1145 |
+
"MODEL"
|
1146 |
+
],
|
1147 |
+
[
|
1148 |
+
144,
|
1149 |
+
71,
|
1150 |
+
0,
|
1151 |
+
70,
|
1152 |
+
1,
|
1153 |
+
"CONDITIONING"
|
1154 |
+
],
|
1155 |
+
[
|
1156 |
+
145,
|
1157 |
+
7,
|
1158 |
+
0,
|
1159 |
+
70,
|
1160 |
+
2,
|
1161 |
+
"CONDITIONING"
|
1162 |
+
],
|
1163 |
+
[
|
1164 |
+
148,
|
1165 |
+
72,
|
1166 |
+
0,
|
1167 |
+
3,
|
1168 |
+
4,
|
1169 |
+
"INT"
|
1170 |
+
],
|
1171 |
+
[
|
1172 |
+
150,
|
1173 |
+
72,
|
1174 |
+
0,
|
1175 |
+
70,
|
1176 |
+
4,
|
1177 |
+
"INT"
|
1178 |
+
],
|
1179 |
+
[
|
1180 |
+
152,
|
1181 |
+
19,
|
1182 |
+
0,
|
1183 |
+
71,
|
1184 |
+
0,
|
1185 |
+
"CLIP"
|
1186 |
+
],
|
1187 |
+
[
|
1188 |
+
153,
|
1189 |
+
5,
|
1190 |
+
0,
|
1191 |
+
70,
|
1192 |
+
3,
|
1193 |
+
"LATENT"
|
1194 |
+
],
|
1195 |
+
[
|
1196 |
+
154,
|
1197 |
+
59,
|
1198 |
+
0,
|
1199 |
+
75,
|
1200 |
+
1,
|
1201 |
+
"VAE"
|
1202 |
+
],
|
1203 |
+
[
|
1204 |
+
155,
|
1205 |
+
70,
|
1206 |
+
0,
|
1207 |
+
75,
|
1208 |
+
0,
|
1209 |
+
"LATENT"
|
1210 |
+
],
|
1211 |
+
[
|
1212 |
+
157,
|
1213 |
+
66,
|
1214 |
+
0,
|
1215 |
+
77,
|
1216 |
+
0,
|
1217 |
+
"IMAGE"
|
1218 |
+
],
|
1219 |
+
[
|
1220 |
+
158,
|
1221 |
+
75,
|
1222 |
+
0,
|
1223 |
+
77,
|
1224 |
+
1,
|
1225 |
+
"IMAGE"
|
1226 |
+
],
|
1227 |
+
[
|
1228 |
+
160,
|
1229 |
+
77,
|
1230 |
+
0,
|
1231 |
+
78,
|
1232 |
+
0,
|
1233 |
+
"IMAGE"
|
1234 |
+
],
|
1235 |
+
[
|
1236 |
+
161,
|
1237 |
+
79,
|
1238 |
+
0,
|
1239 |
+
78,
|
1240 |
+
1,
|
1241 |
+
"FILM_MODEL"
|
1242 |
+
],
|
1243 |
+
[
|
1244 |
+
163,
|
1245 |
+
77,
|
1246 |
+
0,
|
1247 |
+
80,
|
1248 |
+
0,
|
1249 |
+
"IMAGE"
|
1250 |
+
],
|
1251 |
+
[
|
1252 |
+
165,
|
1253 |
+
81,
|
1254 |
+
0,
|
1255 |
+
69,
|
1256 |
+
0,
|
1257 |
+
"STRING"
|
1258 |
+
],
|
1259 |
+
[
|
1260 |
+
166,
|
1261 |
+
83,
|
1262 |
+
0,
|
1263 |
+
81,
|
1264 |
+
0,
|
1265 |
+
"*"
|
1266 |
+
],
|
1267 |
+
[
|
1268 |
+
167,
|
1269 |
+
83,
|
1270 |
+
0,
|
1271 |
+
6,
|
1272 |
+
1,
|
1273 |
+
"STRING"
|
1274 |
+
],
|
1275 |
+
[
|
1276 |
+
168,
|
1277 |
+
84,
|
1278 |
+
0,
|
1279 |
+
7,
|
1280 |
+
1,
|
1281 |
+
"STRING"
|
1282 |
+
],
|
1283 |
+
[
|
1284 |
+
169,
|
1285 |
+
78,
|
1286 |
+
0,
|
1287 |
+
85,
|
1288 |
+
0,
|
1289 |
+
"IMAGE"
|
1290 |
+
],
|
1291 |
+
[
|
1292 |
+
172,
|
1293 |
+
69,
|
1294 |
+
0,
|
1295 |
+
71,
|
1296 |
+
1,
|
1297 |
+
"STRING"
|
1298 |
+
]
|
1299 |
+
],
|
1300 |
+
"groups": [
|
1301 |
+
{
|
1302 |
+
"title": "GENERATE IMAGES",
|
1303 |
+
"bounding": [
|
1304 |
+
-2477,
|
1305 |
+
-219,
|
1306 |
+
3018,
|
1307 |
+
1350
|
1308 |
+
],
|
1309 |
+
"color": "#a1309b",
|
1310 |
+
"locked": false
|
1311 |
+
},
|
1312 |
+
{
|
1313 |
+
"title": "FILM INTERPOLATION",
|
1314 |
+
"bounding": [
|
1315 |
+
559,
|
1316 |
+
66,
|
1317 |
+
1264,
|
1318 |
+
832
|
1319 |
+
],
|
1320 |
+
"color": "#3f789e",
|
1321 |
+
"locked": false
|
1322 |
+
}
|
1323 |
+
],
|
1324 |
+
"config": {},
|
1325 |
+
"extra": {},
|
1326 |
+
"version": 0.4
|
1327 |
+
}
|
comfy_mtb/examples/03-animation_builder-condition-lerp.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"last_node_id":83,"last_link_id":172,"nodes":[{"id":59,"type":"Reroute","pos":[-150.35178124999982,644.4360633544919],"size":[75,26],"flags":{},"order":14,"mode":0,"inputs":[{"name":"","type":"*","link":124}],"outputs":[{"name":"","type":"VAE","links":[139],"slot_index":0}],"properties":{"showOutputText":false,"horizontal":false}},{"id":56,"type":"Reroute","pos":[-1580.8297949218763,644.7740239257807],"size":[75,26],"flags":{},"order":8,"mode":0,"inputs":[{"name":"","type":"*","link":117}],"outputs":[{"name":"","type":"VAE","links":[124]}],"properties":{"showOutputText":false,"horizontal":false}},{"id":57,"type":"Reroute","pos":[-673.8297949218747,-185.22597607421872],"size":[75,26],"flags":{},"order":11,"mode":0,"inputs":[{"name":"","type":"*","link":135}],"outputs":[{"name":"","type":"MODEL","links":[120],"slot_index":0}],"properties":{"showOutputText":false,"horizontal":false}},{"id":65,"type":"Reroute","pos":[-1512.8297949218763,-181.22597607421872],"size":[75,26],"flags":{},"order":6,"mode":0,"inputs":[{"name":"","type":"*","link":134}],"outputs":[{"name":"","type":"MODEL","links":[135]}],"properties":{"showOutputText":false,"horizontal":false}},{"id":16,"type":"CheckpointLoaderSimple","pos":[-2000.8297949218756,192.77402392578128],"size":[315,98],"flags":{},"order":0,"mode":0,"outputs":[{"name":"MODEL","type":"MODEL","links":[134],"slot_index":0},{"name":"CLIP","type":"CLIP","links":[116],"slot_index":1},{"name":"VAE","type":"VAE","links":[117],"slot_index":2}],"properties":{"Node name for S&R":"CheckpointLoaderSimple"},"widgets_values":["revAnimated_v122.safetensors"]},{"id":19,"type":"CLIPSetLastLayer","pos":[-1597,209],"size":[315,58],"flags":{},"order":7,"mode":0,"inputs":[{"name":"clip","type":"CLIP","link":116}],"outputs":[{"name":"CLIP","type":"CLIP","links":[28,29,149],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"CLIPSetLastLayer"},"widgets_values":[-2]},{"id":7,"type":"CLIPTextEncode","pos":[-839,335],"size":[210,54],"flags":{},"order":12,"mode":0,"inputs":[{"name":"clip","type":"CLIP","link":29},{"name":"text","type":"STRING","link":163,"widget":{"name":"text","config":["STRING",{"multiline":true}]},"slot_index":1}],"outputs":[{"name":"CONDITIONING","type":"CONDITIONING","links":[6],"slot_index":0}],"properties":{"Node name for S&R":"CLIPTextEncode"},"widgets_values":["worst quality, hands, embedding:EasyNegative,"]},{"id":5,"type":"EmptyLatentImage","pos":[-969,484],"size":[315,106],"flags":{},"order":1,"mode":0,"outputs":[{"name":"LATENT","type":"LATENT","links":[2],"slot_index":0}],"properties":{"Node name for S&R":"EmptyLatentImage"},"widgets_values":[768,512,1]},{"id":80,"type":"Text box","pos":[-1397,356],"size":[294,93.11674499511719],"flags":{},"order":2,"mode":0,"outputs":[{"name":"STRING","type":"STRING","links":[163],"shape":3,"slot_index":0}],"title":"❌Mel Negatives (general) (Negative)","properties":{"Node name for S&R":"Text box"},"widgets_values":["embedding:EasyNegative, embedding:EasyNegativeV2, watermark, text, deformed"]},{"id":72,"type":"ConditioningAverage ","pos":[-960,2],"size":[380.4000244140625,78],"flags":{},"order":17,"mode":0,"inputs":[{"name":"conditioning_to","type":"CONDITIONING","link":151},{"name":"conditioning_from","type":"CONDITIONING","link":152,"slot_index":1},{"name":"conditioning_to_strength","type":"FLOAT","link":154,"widget":{"name":"conditioning_to_strength","config":["FLOAT",{"default":1,"min":0,"max":1,"step":0.01}]},"slot_index":2}],"outputs":[{"name":"CONDITIONING","type":"CONDITIONING","links":[153],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"ConditioningAverage "},"widgets_values":[1]},{"id":6,"type":"CLIPTextEncode","pos":[-1261,-46],"size":[210,54],"flags":{},"order":15,"mode":0,"inputs":[{"name":"clip","type":"CLIP","link":28},{"name":"text","type":"STRING","link":145,"widget":{"name":"text","config":["STRING",{"multiline":true}]},"slot_index":1}],"outputs":[{"name":"CONDITIONING","type":"CONDITIONING","links":[151],"slot_index":0}],"properties":{"Node name for S&R":"CLIPTextEncode"},"widgets_values":["A majestic Lion, fur in the wind, (smirk smile)"]},{"id":70,"type":"CLIPTextEncode","pos":[-1245,87],"size":[210,54],"flags":{},"order":13,"mode":0,"inputs":[{"name":"clip","type":"CLIP","link":149,"slot_index":0},{"name":"text","type":"STRING","link":150,"widget":{"name":"text","config":["STRING",{"multiline":true}]},"slot_index":1}],"outputs":[{"name":"CONDITIONING","type":"CONDITIONING","links":[152],"slot_index":0}],"properties":{"Node name for S&R":"CLIPTextEncode"},"widgets_values":["A majestic Lion, fur in the wind, (smirk smile)"]},{"id":69,"type":"String Replace (mtb)","pos":[-1560,-70],"size":[210,82],"flags":{},"order":9,"mode":0,"inputs":[{"name":"string","type":"STRING","link":141}],"outputs":[{"name":"STRING","type":"STRING","links":[145],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"String Replace (mtb)"},"widgets_values":["blue","yellow"],"color":"#432","bgcolor":"#653"},{"id":68,"type":"Text box","pos":[-1950,10],"size":[217.2119140625,122.18632507324219],"flags":{},"order":3,"mode":0,"outputs":[{"name":"STRING","type":"STRING","links":[141,150],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"Text box"},"widgets_values":["A cinematic shot of a blue muscle car from 1974, (at high speed), (heavy smoke whisps in the air), (by night), blue neon lighting"],"color":"#432","bgcolor":"#653"},{"id":66,"type":"VAEDecodeTiled","pos":[197,-20],"size":[210,46],"flags":{"collapsed":false},"order":20,"mode":0,"inputs":[{"name":"samples","type":"LATENT","link":138},{"name":"vae","type":"VAE","link":139,"slot_index":1}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[161],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"VAEDecodeTiled"}},{"id":82,"type":"PrimitiveNode","pos":[-1663,1205],"size":[210,82],"flags":{},"order":4,"mode":0,"outputs":[{"name":"INT","type":"INT","links":[170,171],"widget":{"name":"total_frames","config":["INT",{"default":100,"min":0}]},"slot_index":0}],"title":"total_frames","properties":{},"widgets_values":[24,"fixed"]},{"id":78,"type":"SaveImage","pos":[546,-19],"size":[315,270],"flags":{},"order":21,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":161}],"properties":{},"widgets_values":["mtb_demo-conditional_blend"]},{"id":79,"type":"Save Gif (mtb)","pos":[-537,1189],"size":[210,356],"flags":{},"order":18,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":167},{"name":"pingpong","type":"BOOLEAN","link":null}],"properties":{"Node name for S&R":"Save Gif (mtb)"},"widgets_values":[12,1,true,"/view?filename=03031ebda0.gif&subfolder=&type=output","nearest"]},{"id":83,"type":"Note","pos":[-975,906],"size":[506.81982421875,186.09432983398438],"flags":{},"order":5,"mode":0,"title":"Note👈 RUN THE QUEUE FROM THE ANIMATION BUILDER NODE","properties":{"text":""},"widgets_values":["- This will queue 24 frames\n- The 0-1 output is an interpolated float from 0 to 1 over the frame count\n- Once the loop is done, loop_ended is set to true and activates 'Get Batch from history'\n\nCurrently experimental but the idea of the loop count is to use the \"count\" output to know at which loop we are at, it can be plugged to the seed for instance so that each loop uses a new seed.\nThe reason it's experimental is that I did not find a good way to enforce Get Batch from History to be run last in the workflow, hence it's run before the final frame of the loop..."],"color":"#232","bgcolor":"#353","shape":1},{"id":3,"type":"KSampler","pos":[-483,-21],"size":[315,474],"flags":{},"order":19,"mode":0,"inputs":[{"name":"model","type":"MODEL","link":120},{"name":"positive","type":"CONDITIONING","link":153},{"name":"negative","type":"CONDITIONING","link":6},{"name":"latent_image","type":"LATENT","link":2}],"outputs":[{"name":"LATENT","type":"LATENT","links":[138],"slot_index":0}],"properties":{"Node name for S&R":"KSampler"},"widgets_values":[5428234391,"fixed",45,8,"euler_ancestral","simple",1]},{"id":81,"type":"Get Batch From History (mtb)","pos":[-823,1188],"size":[235.1999969482422,122],"flags":{},"order":16,"mode":0,"inputs":[{"name":"passthrough_image","type":"IMAGE","link":null},{"name":"count","type":"INT","link":171,"widget":{"name":"count","config":["INT",{"default":1,"min":0}]},"slot_index":2},{"name":"enable","type":"BOOLEAN","link":172,"widget":{"name":"enable","config":["BOOLEAN",{"default":true}]},"slot_index":2}],"outputs":[{"name":"i","type":"IMAGE","links":[167],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"Get Batch From History (mtb)"},"widgets_values":[true,24,0,54,true]},{"id":73,"type":"Animation Builder (mtb)","pos":[-1285,831],"size":[211.60000610351562,294],"flags":{},"order":10,"mode":0,"inputs":[{"name":"total_frames","type":"INT","link":170,"widget":{"name":"total_frames","config":["INT",{"default":100,"min":0}]},"slot_index":0}],"outputs":[{"name":"frame","type":"INT","links":null,"shape":3,"slot_index":0},{"name":"0-1 (scaled)","type":"FLOAT","links":[154],"shape":3,"slot_index":1},{"name":"count","type":"INT","links":null,"shape":3},{"name":"loop_ended","type":"BOOLEAN","links":[172],"shape":3,"slot_index":3}],"properties":{"Node name for S&R":"Animation Builder (mtb)"},"widgets_values":[24,1,1,24,1,"frame: 0 / 23","Done 😎!","reset","queue"],"color":"#232","bgcolor":"#353","shape":1}],"links":[[2,5,0,3,3,"LATENT"],[6,7,0,3,2,"CONDITIONING"],[28,19,0,6,0,"CLIP"],[29,19,0,7,0,"CLIP"],[116,16,1,19,0,"CLIP"],[117,16,2,56,0,"*"],[120,57,0,3,0,"MODEL"],[124,56,0,59,0,"*"],[134,16,0,65,0,"*"],[135,65,0,57,0,"*"],[138,3,0,66,0,"LATENT"],[139,59,0,66,1,"VAE"],[141,68,0,69,0,"STRING"],[145,69,0,6,1,"STRING"],[149,19,0,70,0,"CLIP"],[150,68,0,70,1,"STRING"],[151,6,0,72,0,"CONDITIONING"],[152,70,0,72,1,"CONDITIONING"],[153,72,0,3,1,"CONDITIONING"],[154,73,1,72,2,"FLOAT"],[161,66,0,78,0,"IMAGE"],[163,80,0,7,1,"STRING"],[167,81,0,79,0,"IMAGE"],[170,82,0,73,0,"INT"],[171,82,0,81,1,"INT"],[172,73,3,81,2,"BOOLEAN"]],"groups":[{"title":"Txt2Img","bounding":[-2061,-234,1932,973],"color":"#a1309b","locked":false},{"title":"Save Intermediate Image","bounding":[150,-116,303,213],"color":"#3f789e","locked":false}],"config":{},"extra":{},"version":0.4}
|
comfy_mtb/examples/04-animation_builder-deforum.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"last_node_id":43,"last_link_id":69,"nodes":[{"id":24,"type":"Note","pos":[-827,406],"size":[233.25148010253906,82.53218841552734],"flags":{},"order":0,"mode":0,"properties":{"text":""},"widgets_values":["On first frame we get the init image, on all subsequent ones the feedback from the previous queue item"],"color":"#223","bgcolor":"#335","shape":1},{"id":10,"type":"LoadImage","pos":[-1409,524],"size":[315,314],"flags":{},"order":1,"mode":0,"outputs":[{"name":"IMAGE","type":"IMAGE","links":[10],"shape":3,"slot_index":0},{"name":"MASK","type":"MASK","links":null,"shape":3}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["example.png","image"],"color":"#432","bgcolor":"#653","shape":1},{"id":35,"type":"CLIPTextEncode","pos":[-118,331],"size":[210,54],"flags":{},"order":7,"mode":0,"inputs":[{"name":"clip","type":"CLIP","link":60},{"name":"text","type":"STRING","link":66,"widget":{"name":"text","config":["STRING",{"multiline":true}]}}],"outputs":[{"name":"CONDITIONING","type":"CONDITIONING","links":[54],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"CLIPTextEncode"},"widgets_values":[""]},{"id":9,"type":"CheckpointLoaderSimple","pos":[-558,114],"size":[301.2330322265625,98],"flags":{},"order":2,"mode":0,"outputs":[{"name":"MODEL","type":"MODEL","links":[62],"shape":3,"slot_index":0},{"name":"CLIP","type":"CLIP","links":[59,60],"shape":3,"slot_index":1},{"name":"VAE","type":"VAE","links":[58,69],"shape":3,"slot_index":2}],"properties":{"Node name for S&R":"CheckpointLoaderSimple"},"widgets_values":["revAnimated_v122.safetensors"]},{"id":37,"type":"VAEEncode","pos":[-125,236],"size":[210,46],"flags":{},"order":16,"mode":0,"inputs":[{"name":"pixels","type":"IMAGE","link":57},{"name":"vae","type":"VAE","link":58,"slot_index":1}],"outputs":[{"name":"LATENT","type":"LATENT","links":[55],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"VAEEncode"}},{"id":34,"type":"CLIPTextEncode","pos":[-124,84],"size":[210,96],"flags":{},"order":6,"mode":0,"inputs":[{"name":"clip","type":"CLIP","link":59}],"outputs":[{"name":"CONDITIONING","type":"CONDITIONING","links":[53],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"CLIPTextEncode"},"widgets_values":["A plastic (skeleton) in a pink dress sleeping, draping, wrinkles, shiny"]},{"id":33,"type":"Text box","pos":[-497,349],"size":[294,95.1284408569336],"flags":{},"order":3,"mode":0,"outputs":[{"name":"STRING","type":"STRING","links":[66],"shape":3,"slot_index":0}],"title":"❌Mel Negatives (general) (Negative)","properties":{"Node name for S&R":"Text box"},"widgets_values":["embedding:EasyNegative, embedding:EasyNegativeV2, watermark, text, deformed, NSFW, Cleavage, Pubic Hair, Nudity, Naked, censored"]},{"id":39,"type":"Reroute","pos":[-156,908],"size":[75,26],"flags":{},"order":11,"mode":0,"inputs":[{"name":"","type":"*","link":67}],"outputs":[{"name":"","type":"INT","links":[68]}],"properties":{"showOutputText":false,"horizontal":false}},{"id":36,"type":"KSampler","pos":[223,262],"size":[315,442],"flags":{},"order":17,"mode":0,"inputs":[{"name":"model","type":"MODEL","link":62,"slot_index":0},{"name":"positive","type":"CONDITIONING","link":53},{"name":"negative","type":"CONDITIONING","link":54},{"name":"latent_image","type":"LATENT","link":55},{"name":"denoise","type":"FLOAT","link":63,"widget":{"name":"denoise","config":["FLOAT",{"default":1,"min":0,"max":1,"step":0.01}]},"slot_index":4},{"name":"seed","type":"INT","link":68,"widget":{"name":"seed","config":["INT",{"default":0,"min":0,"max":18446744073709552000}]}}],"outputs":[{"name":"LATENT","type":"LATENT","links":[56],"shape":3,"slot_index":0,"color":"#FF9CF9"}],"properties":{"Node name for S&R":"KSampler"},"widgets_values":[938170558049910,"randomize",15,8,"euler_ancestral","normal",0.6]},{"id":15,"type":"SaveImage","pos":[782,259],"size":[330.1112365722656,378.1239929199219],"flags":{},"order":19,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":65}],"properties":{},"widgets_values":["ComfyUI"]},{"id":38,"type":"VAEDecode","pos":[556,260],"size":[210,46],"flags":{},"order":18,"mode":0,"inputs":[{"name":"samples","type":"LATENT","link":56},{"name":"vae","type":"VAE","link":69,"slot_index":1}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[65],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"VAEDecode"}},{"id":11,"type":"Get Batch From History (mtb)","pos":[-800,524],"size":[235.1999969482422,126],"flags":{},"order":13,"mode":0,"inputs":[{"name":"passthrough_image","type":"IMAGE","link":10},{"name":"enable","type":"BOOLEAN","link":9,"widget":{"name":"enable","config":["BOOLEAN",{"default":true}]},"slot_index":1}],"outputs":[{"name":"i","type":"IMAGE","links":[26],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"Get Batch From History (mtb)"},"widgets_values":[false,1,0,1035,true],"color":"#223","bgcolor":"#335"},{"id":12,"type":"Int To Bool (mtb)","pos":[-1065,765],"size":[210,36.366058349609375],"flags":{},"order":9,"mode":0,"inputs":[{"name":"int","type":"INT","link":34,"widget":{"name":"int","config":["INT",{"default":0}]},"slot_index":0}],"outputs":[{"name":"BOOLEAN","type":"BOOLEAN","links":[9],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"Int To Bool (mtb)"},"widgets_values":[29],"color":"#222","bgcolor":"#000"},{"id":22,"type":"Fit Number (mtb)","pos":[-647,882],"size":[232.28509521484375,178],"flags":{},"order":10,"mode":0,"inputs":[{"name":"value","type":"FLOAT","link":27}],"outputs":[{"name":"FLOAT","type":"FLOAT","links":[63],"shape":3,"slot_index":0}],"title":"Fit Number (mtb) - Denoise","properties":{"Node name for S&R":"Fit Number (mtb)"},"widgets_values":[true,0,1,0.4,0.65,"Quart In/Out"]},{"id":14,"type":"Transform Image (mtb)","pos":[-527,520],"size":[315,214],"flags":{},"order":15,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":26}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[57],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"Transform Image (mtb)"},"widgets_values":[15,0,0.98,-1,0,"reflect","#930606"],"color":"#223","bgcolor":"#335"},{"id":31,"type":"PrimitiveNode","pos":[-2204,1309],"size":[210,82],"flags":{},"order":4,"mode":0,"outputs":[{"name":"INT","type":"INT","links":[43],"widget":{"name":"total_frames","config":["INT",{"default":100,"min":0}]},"slot_index":0}],"title":"total_frames","properties":{},"widgets_values":[30,"fixed"],"color":"#432","bgcolor":"#653"},{"id":43,"type":"Note","pos":[-2209,1157],"size":[214.22988891601562,107.93704986572266],"flags":{},"order":5,"mode":0,"properties":{"text":""},"widgets_values":["BECAUSE THE ORDER OF OPERATION FROM HOW ANIMATION BUILDER IS DONE, YOU MUST USE ONE MORE FRAME THAN NEEDED HERE"],"color":"#432","bgcolor":"#653"},{"id":18,"type":"Get Batch From History (mtb)","pos":[-960,1257],"size":[235.1999969482422,126],"flags":{},"order":12,"mode":0,"inputs":[{"name":"passthrough_image","type":"IMAGE","link":null},{"name":"enable","type":"BOOLEAN","link":31,"widget":{"name":"enable","config":["BOOLEAN",{"default":true}]},"slot_index":1}],"outputs":[{"name":"i","type":"IMAGE","links":[18],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"Get Batch From History (mtb)"},"widgets_values":[true,29,0,1035,false]},{"id":19,"type":"Save Gif (mtb)","pos":[-613,1256],"size":[210,372],"flags":{},"order":14,"mode":0,"inputs":[{"name":"image","type":"IMAGE","link":18}],"properties":{"Node name for S&R":"Save Gif (mtb)"},"widgets_values":[12,1,false,false,"nearest","/view?filename=0f83896060.gif&subfolder=&type=output"]},{"id":17,"type":"Animation Builder (mtb)","pos":[-1312,883],"size":[211.60000610351562,294],"flags":{},"order":8,"mode":0,"inputs":[{"name":"total_frames","type":"INT","link":43,"widget":{"name":"total_frames","config":["INT",{"default":100,"min":0}]},"slot_index":0}],"outputs":[{"name":"frame","type":"INT","links":[34],"shape":3,"slot_index":0},{"name":"0-1 (scaled)","type":"FLOAT","links":[27],"shape":3,"slot_index":1},{"name":"count","type":"INT","links":[67],"shape":3,"slot_index":2},{"name":"loop_ended","type":"BOOLEAN","links":[31],"shape":3,"slot_index":3}],"properties":{"Node name for S&R":"Animation Builder (mtb)"},"widgets_values":[30,1,2,0,0,"Idle","Iteration: Idle","reset","queue"],"color":"#232","bgcolor":"#353","shape":1}],"links":[[9,12,0,11,1,"BOOLEAN"],[10,10,0,11,0,"IMAGE"],[18,18,0,19,0,"IMAGE"],[26,11,0,14,0,"IMAGE"],[27,17,1,22,0,"FLOAT"],[31,17,3,18,1,"BOOLEAN"],[34,17,0,12,0,"INT"],[43,31,0,17,0,"INT"],[53,34,0,36,1,"CONDITIONING"],[54,35,0,36,2,"CONDITIONING"],[55,37,0,36,3,"LATENT"],[56,36,0,38,0,"LATENT"],[57,14,0,37,0,"IMAGE"],[58,9,2,37,1,"VAE"],[59,9,1,34,0,"CLIP"],[60,9,1,35,0,"CLIP"],[62,9,0,36,0,"MODEL"],[63,22,0,36,4,"FLOAT"],[65,38,0,15,0,"IMAGE"],[66,33,0,35,1,"STRING"],[67,17,2,39,0,"*"],[68,39,0,36,5,"INT"],[69,9,2,38,1,"VAE"]],"groups":[{"title":"Video Output","bounding":[-702,1161,516,773],"color":"#3f789e","locked":false},{"title":"START THE QUEUE BY CLICKLING HERE 👆","bounding":[-1612,1219,521,80],"color":"#8A8","locked":false}],"config":{},"extra":{},"version":0.4}
|
comfy_mtb/examples/README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Examples
|
2 |
+
All the examples use the [RevAnimated model 1.22](https://civitai.com/models/7371?modelVersionId=46846)
|
3 |
+
## 01 Faceswap
|
4 |
+
|
5 |
+
This example showcase the `Face Swap` & `Restore Face` nodes to replace the character with Georges Lucas's face.
|
6 |
+
The face reference image is using the `Load Image From Url` node to avoid bundling input images.
|
7 |
+
|
8 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/272af7d6-f01c-478e-a82f-926e772d7209" width=500/>
|
9 |
+
|
10 |
+
## 02 FILM interpolation
|
11 |
+
This example showcase the FILM interpolation implementation. Here we do text replacement on the condition of two distinct images sharing the same model, input latent & seed to get relatively close images.
|
12 |
+
<img src="https://github.com/melMass/comfy_mtb/assets/7041726/4c28dd87-89fc-4d27-910a-0a1fcf28cdc0" width=500/>
|
comfy_mtb/font.ttf
ADDED
Binary file (24.5 kB). View file
|
|
comfy_mtb/html/style.css
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
html {
|
2 |
+
height: 100%;
|
3 |
+
margin: 0;
|
4 |
+
padding: 0;
|
5 |
+
background-color: rgb(33, 33, 33);
|
6 |
+
color: whitesmoke;
|
7 |
+
}
|
8 |
+
|
9 |
+
a {
|
10 |
+
color: whitesmoke;
|
11 |
+
|
12 |
+
}
|
13 |
+
|
14 |
+
.table-container {
|
15 |
+
width: 70%;
|
16 |
+
height: 100%;
|
17 |
+
overflow: auto;
|
18 |
+
}
|
19 |
+
|
20 |
+
table {
|
21 |
+
|
22 |
+
border-collapse: collapse;
|
23 |
+
}
|
24 |
+
|
25 |
+
th,
|
26 |
+
td {
|
27 |
+
padding: 10px;
|
28 |
+
text-align: left;
|
29 |
+
}
|
30 |
+
|
31 |
+
th {
|
32 |
+
background-color: rgb(45, 45, 45);
|
33 |
+
/* Light gray background for header row */
|
34 |
+
font-weight: bold;
|
35 |
+
}
|
36 |
+
|
37 |
+
tr:nth-child(even) {
|
38 |
+
background-color: rgb(45, 45, 45);
|
39 |
+
/* Alternate row background color */
|
40 |
+
}
|
41 |
+
|
42 |
+
tr:hover {
|
43 |
+
background-color: #797979;
|
44 |
+
/* Highlight color on hover */
|
45 |
+
}
|
46 |
+
|
47 |
+
td:nth-child(2) {
|
48 |
+
/* Applies to the second column (Description) */
|
49 |
+
width: 80%;
|
50 |
+
/* Adjust the width as needed */
|
51 |
+
word-wrap: break-word;
|
52 |
+
/* Allow long words to be broken and wrapped to the next line */
|
53 |
+
}
|
54 |
+
|
55 |
+
.mtb_logo {
|
56 |
+
display: flex;
|
57 |
+
flex-direction: column;
|
58 |
+
align-items: center;
|
59 |
+
}
|
60 |
+
|
61 |
+
/* Styling for WebKit-based browsers (Chrome, Edge) */
|
62 |
+
.table-container::-webkit-scrollbar {
|
63 |
+
width: 10px;
|
64 |
+
/* Set the width of the scrollbar */
|
65 |
+
}
|
66 |
+
|
67 |
+
.table-container::-webkit-scrollbar-thumb {
|
68 |
+
background-color: #797979;
|
69 |
+
/* Color of the scrollbar thumb */
|
70 |
+
}
|
71 |
+
|
72 |
+
/* Styling for Firefox */
|
73 |
+
.table-container {
|
74 |
+
scrollbar-width: thin;
|
75 |
+
/* Set the width of the scrollbar */
|
76 |
+
}
|
77 |
+
|
78 |
+
.table-container::-webkit-scrollbar-thumb {
|
79 |
+
background-color: #797979;
|
80 |
+
/* Color of the scrollbar thumb */
|
81 |
+
}
|
82 |
+
|
83 |
+
/* Optionally, you can also style the scrollbar track (background) */
|
84 |
+
.table-container::-webkit-scrollbar-track {
|
85 |
+
background-color: #f2f2f2;
|
86 |
+
}
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
body {
|
91 |
+
margin: 0;
|
92 |
+
padding: 0;
|
93 |
+
font-family: monospace;
|
94 |
+
height: 100%;
|
95 |
+
background-color: rgb(33, 33, 33);
|
96 |
+
|
97 |
+
}
|
98 |
+
|
99 |
+
.title {
|
100 |
+
font-size: 2.5em;
|
101 |
+
font-weight: 700;
|
102 |
+
|
103 |
+
}
|
104 |
+
|
105 |
+
header {
|
106 |
+
display: flex;
|
107 |
+
align-items: center;
|
108 |
+
vertical-align: middle;
|
109 |
+
justify-content: space-between;
|
110 |
+
background-color: rgb(12, 12, 12);
|
111 |
+
padding: 1em;
|
112 |
+
margin: 0;
|
113 |
+
}
|
114 |
+
|
115 |
+
main {
|
116 |
+
display: flex;
|
117 |
+
align-items: center;
|
118 |
+
vertical-align: middle;
|
119 |
+
justify-content: center;
|
120 |
+
padding: 1em;
|
121 |
+
margin: 0;
|
122 |
+
height: 80%;
|
123 |
+
}
|
124 |
+
|
125 |
+
.flex-container {
|
126 |
+
display: flex;
|
127 |
+
flex-direction: column;
|
128 |
+
}
|
129 |
+
|
130 |
+
.menu {
|
131 |
+
font-size: 3em;
|
132 |
+
text-align: center;
|
133 |
+
}
|
comfy_mtb/install.py
ADDED
@@ -0,0 +1,632 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import os
|
3 |
+
import ast
|
4 |
+
import argparse
|
5 |
+
import sys
|
6 |
+
import subprocess
|
7 |
+
from importlib import import_module
|
8 |
+
import platform
|
9 |
+
from pathlib import Path
|
10 |
+
import sys
|
11 |
+
import stat
|
12 |
+
import threading
|
13 |
+
import signal
|
14 |
+
from contextlib import suppress
|
15 |
+
from queue import Queue, Empty
|
16 |
+
from contextlib import contextmanager
|
17 |
+
|
18 |
+
here = Path(__file__).parent
|
19 |
+
executable = sys.executable
|
20 |
+
|
21 |
+
# - detect mode
|
22 |
+
mode = None
|
23 |
+
if os.environ.get("COLAB_GPU"):
|
24 |
+
mode = "colab"
|
25 |
+
elif "python_embeded" in executable:
|
26 |
+
mode = "embeded"
|
27 |
+
elif ".venv" in executable:
|
28 |
+
mode = "venv"
|
29 |
+
|
30 |
+
|
31 |
+
if mode is None:
|
32 |
+
mode = "unknown"
|
33 |
+
|
34 |
+
# - Constants
|
35 |
+
repo_url = "https://github.com/melmass/comfy_mtb.git"
|
36 |
+
repo_owner = "melmass"
|
37 |
+
repo_name = "comfy_mtb"
|
38 |
+
short_platform = {
|
39 |
+
"windows": "win_amd64",
|
40 |
+
"linux": "linux_x86_64",
|
41 |
+
}
|
42 |
+
current_platform = platform.system().lower()
|
43 |
+
|
44 |
+
# region ansi
|
45 |
+
# ANSI escape sequences for text styling
|
46 |
+
ANSI_FORMATS = {
|
47 |
+
"reset": "\033[0m",
|
48 |
+
"bold": "\033[1m",
|
49 |
+
"dim": "\033[2m",
|
50 |
+
"italic": "\033[3m",
|
51 |
+
"underline": "\033[4m",
|
52 |
+
"blink": "\033[5m",
|
53 |
+
"reverse": "\033[7m",
|
54 |
+
"strike": "\033[9m",
|
55 |
+
}
|
56 |
+
|
57 |
+
ANSI_COLORS = {
|
58 |
+
"black": "\033[30m",
|
59 |
+
"red": "\033[31m",
|
60 |
+
"green": "\033[32m",
|
61 |
+
"yellow": "\033[33m",
|
62 |
+
"blue": "\033[34m",
|
63 |
+
"magenta": "\033[35m",
|
64 |
+
"cyan": "\033[36m",
|
65 |
+
"white": "\033[37m",
|
66 |
+
"bright_black": "\033[30;1m",
|
67 |
+
"bright_red": "\033[31;1m",
|
68 |
+
"bright_green": "\033[32;1m",
|
69 |
+
"bright_yellow": "\033[33;1m",
|
70 |
+
"bright_blue": "\033[34;1m",
|
71 |
+
"bright_magenta": "\033[35;1m",
|
72 |
+
"bright_cyan": "\033[36;1m",
|
73 |
+
"bright_white": "\033[37;1m",
|
74 |
+
"bg_black": "\033[40m",
|
75 |
+
"bg_red": "\033[41m",
|
76 |
+
"bg_green": "\033[42m",
|
77 |
+
"bg_yellow": "\033[43m",
|
78 |
+
"bg_blue": "\033[44m",
|
79 |
+
"bg_magenta": "\033[45m",
|
80 |
+
"bg_cyan": "\033[46m",
|
81 |
+
"bg_white": "\033[47m",
|
82 |
+
"bg_bright_black": "\033[40;1m",
|
83 |
+
"bg_bright_red": "\033[41;1m",
|
84 |
+
"bg_bright_green": "\033[42;1m",
|
85 |
+
"bg_bright_yellow": "\033[43;1m",
|
86 |
+
"bg_bright_blue": "\033[44;1m",
|
87 |
+
"bg_bright_magenta": "\033[45;1m",
|
88 |
+
"bg_bright_cyan": "\033[46;1m",
|
89 |
+
"bg_bright_white": "\033[47;1m",
|
90 |
+
}
|
91 |
+
|
92 |
+
|
93 |
+
def apply_format(text, *formats):
|
94 |
+
"""Apply ANSI escape sequences for the specified formats to the given text."""
|
95 |
+
formatted_text = text
|
96 |
+
for format in formats:
|
97 |
+
formatted_text = f"{ANSI_FORMATS.get(format, '')}{formatted_text}{ANSI_FORMATS.get('reset', '')}"
|
98 |
+
return formatted_text
|
99 |
+
|
100 |
+
|
101 |
+
def apply_color(text, color=None, background=None):
|
102 |
+
"""Apply ANSI escape sequences for the specified color and background to the given text."""
|
103 |
+
formatted_text = text
|
104 |
+
if color:
|
105 |
+
formatted_text = f"{ANSI_COLORS.get(color, '')}{formatted_text}{ANSI_FORMATS.get('reset', '')}"
|
106 |
+
if background:
|
107 |
+
formatted_text = f"{ANSI_COLORS.get(background, '')}{formatted_text}{ANSI_FORMATS.get('reset', '')}"
|
108 |
+
return formatted_text
|
109 |
+
|
110 |
+
|
111 |
+
def print_formatted(text, *formats, color=None, background=None, **kwargs):
|
112 |
+
"""Print the given text with the specified formats, color, and background."""
|
113 |
+
formatted_text = apply_format(text, *formats)
|
114 |
+
formatted_text = apply_color(formatted_text, color, background)
|
115 |
+
file = kwargs.get("file", sys.stdout)
|
116 |
+
header = "[mtb install] "
|
117 |
+
|
118 |
+
# Handle console encoding for Unicode characters (utf-8)
|
119 |
+
encoded_header = header.encode(sys.stdout.encoding, errors="replace").decode(
|
120 |
+
sys.stdout.encoding
|
121 |
+
)
|
122 |
+
encoded_text = formatted_text.encode(sys.stdout.encoding, errors="replace").decode(
|
123 |
+
sys.stdout.encoding
|
124 |
+
)
|
125 |
+
|
126 |
+
print(
|
127 |
+
" " * len(encoded_header)
|
128 |
+
if kwargs.get("no_header")
|
129 |
+
else apply_color(apply_format(encoded_header, "bold"), color="yellow"),
|
130 |
+
encoded_text,
|
131 |
+
file=file,
|
132 |
+
)
|
133 |
+
|
134 |
+
|
135 |
+
# endregion
|
136 |
+
|
137 |
+
|
138 |
+
# region utils
|
139 |
+
def enqueue_output(out, queue):
|
140 |
+
for char in iter(lambda: out.read(1), b""):
|
141 |
+
queue.put(char)
|
142 |
+
out.close()
|
143 |
+
|
144 |
+
|
145 |
+
def run_command(cmd, ignored_lines_start=None):
|
146 |
+
if ignored_lines_start is None:
|
147 |
+
ignored_lines_start = []
|
148 |
+
|
149 |
+
if isinstance(cmd, str):
|
150 |
+
shell_cmd = cmd
|
151 |
+
elif isinstance(cmd, list):
|
152 |
+
shell_cmd = ""
|
153 |
+
for arg in cmd:
|
154 |
+
if isinstance(arg, Path):
|
155 |
+
arg = arg.as_posix()
|
156 |
+
shell_cmd += f"{arg} "
|
157 |
+
else:
|
158 |
+
raise ValueError(
|
159 |
+
"Invalid 'cmd' argument. It must be a string or a list of arguments."
|
160 |
+
)
|
161 |
+
|
162 |
+
process = subprocess.Popen(
|
163 |
+
shell_cmd,
|
164 |
+
stdout=subprocess.PIPE,
|
165 |
+
stderr=subprocess.PIPE,
|
166 |
+
universal_newlines=True,
|
167 |
+
shell=True,
|
168 |
+
)
|
169 |
+
|
170 |
+
# Create separate threads to read standard output and standard error streams
|
171 |
+
stdout_queue = Queue()
|
172 |
+
stderr_queue = Queue()
|
173 |
+
stdout_thread = threading.Thread(
|
174 |
+
target=enqueue_output, args=(process.stdout, stdout_queue)
|
175 |
+
)
|
176 |
+
stderr_thread = threading.Thread(
|
177 |
+
target=enqueue_output, args=(process.stderr, stderr_queue)
|
178 |
+
)
|
179 |
+
stdout_thread.daemon = True
|
180 |
+
stderr_thread.daemon = True
|
181 |
+
stdout_thread.start()
|
182 |
+
stderr_thread.start()
|
183 |
+
|
184 |
+
interrupted = False
|
185 |
+
|
186 |
+
def signal_handler(signum, frame):
|
187 |
+
nonlocal interrupted
|
188 |
+
interrupted = True
|
189 |
+
print("Command execution interrupted.")
|
190 |
+
|
191 |
+
# Register the signal handler for keyboard interrupts (SIGINT)
|
192 |
+
signal.signal(signal.SIGINT, signal_handler)
|
193 |
+
|
194 |
+
stdout_buffer = ""
|
195 |
+
stderr_buffer = ""
|
196 |
+
|
197 |
+
# Process output from both streams until the process completes or interrupted
|
198 |
+
while not interrupted and (
|
199 |
+
process.poll() is None or not stdout_queue.empty() or not stderr_queue.empty()
|
200 |
+
):
|
201 |
+
with suppress(Empty):
|
202 |
+
stdout_char = stdout_queue.get_nowait()
|
203 |
+
stdout_buffer += stdout_char
|
204 |
+
if stdout_char == "\n":
|
205 |
+
if not any(
|
206 |
+
stdout_buffer.startswith(ign) for ign in ignored_lines_start
|
207 |
+
):
|
208 |
+
print(stdout_buffer.strip())
|
209 |
+
stdout_buffer = ""
|
210 |
+
with suppress(Empty):
|
211 |
+
stderr_char = stderr_queue.get_nowait()
|
212 |
+
stderr_buffer += stderr_char
|
213 |
+
if stderr_char == "\n":
|
214 |
+
print(stderr_buffer.strip())
|
215 |
+
stderr_buffer = ""
|
216 |
+
|
217 |
+
# Print any remaining content in buffers
|
218 |
+
if stdout_buffer and not any(
|
219 |
+
stdout_buffer.startswith(ign) for ign in ignored_lines_start
|
220 |
+
):
|
221 |
+
print(stdout_buffer.strip())
|
222 |
+
if stderr_buffer:
|
223 |
+
print(stderr_buffer.strip())
|
224 |
+
|
225 |
+
return_code = process.returncode
|
226 |
+
|
227 |
+
if return_code == 0 and not interrupted:
|
228 |
+
print("Command executed successfully!")
|
229 |
+
else:
|
230 |
+
if not interrupted:
|
231 |
+
print(f"Command failed with return code: {return_code}")
|
232 |
+
|
233 |
+
|
234 |
+
# endregion
|
235 |
+
|
236 |
+
try:
|
237 |
+
import requirements
|
238 |
+
except ImportError:
|
239 |
+
print_formatted("Installing requirements-parser...", "italic", color="yellow")
|
240 |
+
run_command([sys.executable, "-m", "pip", "install", "requirements-parser"])
|
241 |
+
import requirements
|
242 |
+
|
243 |
+
print_formatted("Done.", "italic", color="green")
|
244 |
+
|
245 |
+
try:
|
246 |
+
from tqdm import tqdm
|
247 |
+
except ImportError:
|
248 |
+
print_formatted("Installing tqdm...", "italic", color="yellow")
|
249 |
+
run_command([sys.executable, "-m", "pip", "install", "--upgrade", "tqdm"])
|
250 |
+
from tqdm import tqdm
|
251 |
+
|
252 |
+
pip_map = {
|
253 |
+
"onnxruntime-gpu": "onnxruntime",
|
254 |
+
"opencv-contrib": "cv2",
|
255 |
+
"tb-nightly": "tensorboard",
|
256 |
+
"protobuf": "google.protobuf",
|
257 |
+
# Add more mappings as needed
|
258 |
+
}
|
259 |
+
|
260 |
+
|
261 |
+
def is_pipe():
|
262 |
+
if not sys.stdin.isatty():
|
263 |
+
return False
|
264 |
+
if sys.platform == "win32":
|
265 |
+
try:
|
266 |
+
import msvcrt
|
267 |
+
|
268 |
+
return msvcrt.get_osfhandle(0) != -1
|
269 |
+
except ImportError:
|
270 |
+
return False
|
271 |
+
else:
|
272 |
+
try:
|
273 |
+
mode = os.fstat(0).st_mode
|
274 |
+
return (
|
275 |
+
stat.S_ISFIFO(mode)
|
276 |
+
or stat.S_ISREG(mode)
|
277 |
+
or stat.S_ISBLK(mode)
|
278 |
+
or stat.S_ISSOCK(mode)
|
279 |
+
)
|
280 |
+
except OSError:
|
281 |
+
return False
|
282 |
+
|
283 |
+
|
284 |
+
@contextmanager
|
285 |
+
def suppress_std():
|
286 |
+
with open(os.devnull, "w") as devnull:
|
287 |
+
old_stdout = sys.stdout
|
288 |
+
old_stderr = sys.stderr
|
289 |
+
sys.stdout = devnull
|
290 |
+
sys.stderr = devnull
|
291 |
+
|
292 |
+
try:
|
293 |
+
yield
|
294 |
+
finally:
|
295 |
+
sys.stdout = old_stdout
|
296 |
+
sys.stderr = old_stderr
|
297 |
+
|
298 |
+
|
299 |
+
# Get the version from __init__.py
|
300 |
+
def get_local_version():
|
301 |
+
init_file = os.path.join(os.path.dirname(__file__), "__init__.py")
|
302 |
+
if os.path.isfile(init_file):
|
303 |
+
with open(init_file, "r") as f:
|
304 |
+
tree = ast.parse(f.read())
|
305 |
+
for node in ast.walk(tree):
|
306 |
+
if isinstance(node, ast.Assign):
|
307 |
+
for target in node.targets:
|
308 |
+
if (
|
309 |
+
isinstance(target, ast.Name)
|
310 |
+
and target.id == "__version__"
|
311 |
+
and isinstance(node.value, ast.Str)
|
312 |
+
):
|
313 |
+
return node.value.s
|
314 |
+
return None
|
315 |
+
|
316 |
+
|
317 |
+
def download_file(url, file_name):
|
318 |
+
with requests.get(url, stream=True) as response:
|
319 |
+
response.raise_for_status()
|
320 |
+
total_size = int(response.headers.get("content-length", 0))
|
321 |
+
with open(file_name, "wb") as file, tqdm(
|
322 |
+
desc=file_name.stem,
|
323 |
+
total=total_size,
|
324 |
+
unit="B",
|
325 |
+
unit_scale=True,
|
326 |
+
unit_divisor=1024,
|
327 |
+
) as progress_bar:
|
328 |
+
for chunk in response.iter_content(chunk_size=8192):
|
329 |
+
file.write(chunk)
|
330 |
+
progress_bar.update(len(chunk))
|
331 |
+
|
332 |
+
|
333 |
+
def get_requirements(path: Path):
|
334 |
+
with open(path.resolve(), "r") as requirements_file:
|
335 |
+
requirements_txt = requirements_file.read()
|
336 |
+
|
337 |
+
try:
|
338 |
+
parsed_requirements = requirements.parse(requirements_txt)
|
339 |
+
except AttributeError:
|
340 |
+
print_formatted(
|
341 |
+
f"Failed to parse {path}. Please make sure the file is correctly formatted.",
|
342 |
+
"bold",
|
343 |
+
color="red",
|
344 |
+
)
|
345 |
+
|
346 |
+
return
|
347 |
+
|
348 |
+
return parsed_requirements
|
349 |
+
|
350 |
+
|
351 |
+
def try_import(requirement):
|
352 |
+
dependency = requirement.name.strip()
|
353 |
+
import_name = pip_map.get(dependency, dependency)
|
354 |
+
installed = False
|
355 |
+
|
356 |
+
pip_name = dependency
|
357 |
+
pip_spec = "".join(specs[0]) if (specs := requirement.specs) else ""
|
358 |
+
try:
|
359 |
+
with suppress_std():
|
360 |
+
import_module(import_name)
|
361 |
+
print_formatted(
|
362 |
+
f"\t✅ Package {pip_name} already installed (import name: '{import_name}').",
|
363 |
+
"bold",
|
364 |
+
color="green",
|
365 |
+
no_header=True,
|
366 |
+
)
|
367 |
+
installed = True
|
368 |
+
except ImportError:
|
369 |
+
print_formatted(
|
370 |
+
f"\t⛔ Package {pip_name} is missing (import name: '{import_name}').",
|
371 |
+
"bold",
|
372 |
+
color="red",
|
373 |
+
no_header=True,
|
374 |
+
)
|
375 |
+
|
376 |
+
return (installed, pip_name, pip_spec, import_name)
|
377 |
+
|
378 |
+
|
379 |
+
def import_or_install(requirement, dry=False):
|
380 |
+
installed, pip_name, pip_spec, import_name = try_import(requirement)
|
381 |
+
|
382 |
+
pip_install_name = pip_name + pip_spec
|
383 |
+
|
384 |
+
if not installed:
|
385 |
+
print_formatted(f"Installing package {pip_name}...", "italic", color="yellow")
|
386 |
+
if dry:
|
387 |
+
print_formatted(
|
388 |
+
f"Dry-run: Package {pip_install_name} would be installed (import name: '{import_name}').",
|
389 |
+
color="yellow",
|
390 |
+
)
|
391 |
+
else:
|
392 |
+
try:
|
393 |
+
run_command([sys.executable, "-m", "pip", "install", pip_install_name])
|
394 |
+
print_formatted(
|
395 |
+
f"Package {pip_install_name} installed successfully using pip package name (import name: '{import_name}')",
|
396 |
+
"bold",
|
397 |
+
color="green",
|
398 |
+
)
|
399 |
+
except subprocess.CalledProcessError as e:
|
400 |
+
print_formatted(
|
401 |
+
f"Failed to install package {pip_install_name} using pip package name (import name: '{import_name}'). Error: {str(e)}",
|
402 |
+
"bold",
|
403 |
+
color="red",
|
404 |
+
)
|
405 |
+
|
406 |
+
|
407 |
+
def get_github_assets(tag=None):
|
408 |
+
if tag:
|
409 |
+
tag_url = (
|
410 |
+
f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/tags/{tag}"
|
411 |
+
)
|
412 |
+
else:
|
413 |
+
tag_url = (
|
414 |
+
f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
415 |
+
)
|
416 |
+
response = requests.get(tag_url)
|
417 |
+
if response.status_code == 404:
|
418 |
+
# print_formatted(
|
419 |
+
# f"Tag version '{apply_color(version,'cyan')}' not found for {owner}/{repo} repository."
|
420 |
+
# )
|
421 |
+
print_formatted("Error retrieving the release assets.", color="red")
|
422 |
+
sys.exit()
|
423 |
+
|
424 |
+
tag_data = response.json()
|
425 |
+
tag_name = tag_data["name"]
|
426 |
+
|
427 |
+
return tag_data, tag_name
|
428 |
+
|
429 |
+
|
430 |
+
# Install dependencies from requirements.txt
|
431 |
+
def install_dependencies(dry=False):
|
432 |
+
parsed_requirements = get_requirements(here / "reqs.txt")
|
433 |
+
if not parsed_requirements:
|
434 |
+
return
|
435 |
+
print_formatted(
|
436 |
+
"Installing dependencies from reqs.txt...", "italic", color="yellow"
|
437 |
+
)
|
438 |
+
|
439 |
+
for requirement in parsed_requirements:
|
440 |
+
import_or_install(requirement, dry=dry)
|
441 |
+
|
442 |
+
|
443 |
+
if __name__ == "__main__":
|
444 |
+
full = False
|
445 |
+
if len(sys.argv) == 1:
|
446 |
+
print_formatted(
|
447 |
+
"No arguments provided, doing a full install/update...",
|
448 |
+
"italic",
|
449 |
+
color="yellow",
|
450 |
+
)
|
451 |
+
|
452 |
+
full = True
|
453 |
+
|
454 |
+
# Parse command-line arguments
|
455 |
+
parser = argparse.ArgumentParser(description="Comfy_mtb install script")
|
456 |
+
parser.add_argument(
|
457 |
+
"--path",
|
458 |
+
"-p",
|
459 |
+
type=str,
|
460 |
+
help="Path to clone the repository to (i.e the absolute path to ComfyUI/custom_nodes)",
|
461 |
+
)
|
462 |
+
parser.add_argument(
|
463 |
+
"--wheels", "-w", action="store_true", help="Install wheel dependencies"
|
464 |
+
)
|
465 |
+
parser.add_argument(
|
466 |
+
"--requirements", "-r", action="store_true", help="Install requirements.txt"
|
467 |
+
)
|
468 |
+
parser.add_argument(
|
469 |
+
"--dry",
|
470 |
+
action="store_true",
|
471 |
+
help="Print what will happen without doing it (still making requests to the GH Api)",
|
472 |
+
)
|
473 |
+
|
474 |
+
# - keep
|
475 |
+
# parser.add_argument(
|
476 |
+
# "--version",
|
477 |
+
# default=get_local_version(),
|
478 |
+
# help="Version to check against the GitHub API",
|
479 |
+
# )
|
480 |
+
print_formatted("mtb install", "bold", color="yellow")
|
481 |
+
|
482 |
+
args = parser.parse_args()
|
483 |
+
|
484 |
+
# wheels_directory = here / "wheels"
|
485 |
+
print_formatted(f"Detected environment: {apply_color(mode,'cyan')}")
|
486 |
+
|
487 |
+
if args.path:
|
488 |
+
clone_dir = Path(args.path)
|
489 |
+
if not clone_dir.exists():
|
490 |
+
print_formatted(
|
491 |
+
"The path provided does not exist on disk... It must be pointing to ComfyUI's custom_nodes directory"
|
492 |
+
)
|
493 |
+
sys.exit()
|
494 |
+
|
495 |
+
else:
|
496 |
+
repo_dir = clone_dir / repo_name
|
497 |
+
if not repo_dir.exists():
|
498 |
+
print_formatted(f"Cloning to {repo_dir}...", "italic", color="yellow")
|
499 |
+
run_command(["git", "clone", "--recursive", repo_url, repo_dir])
|
500 |
+
else:
|
501 |
+
print_formatted(
|
502 |
+
f"Directory {repo_dir} already exists, we will update it..."
|
503 |
+
)
|
504 |
+
run_command(["git", "pull", "-C", repo_dir])
|
505 |
+
# os.chdir(clone_dir)
|
506 |
+
here = clone_dir
|
507 |
+
full = True
|
508 |
+
|
509 |
+
# Install dependencies from requirements.txt
|
510 |
+
# if args.requirements or mode == "venv":
|
511 |
+
|
512 |
+
# if (not args.wheels and mode not in ["colab", "embeded"]) and not full:
|
513 |
+
# print_formatted(
|
514 |
+
# "Skipping wheel installation. Use --wheels to install wheel dependencies. (only needed for Comfy embed)",
|
515 |
+
# "italic",
|
516 |
+
# color="yellow",
|
517 |
+
# )
|
518 |
+
|
519 |
+
# install_dependencies(dry=args.dry)
|
520 |
+
# sys.exit()
|
521 |
+
|
522 |
+
# if mode in ["colab", "embeded"]:
|
523 |
+
# print_formatted(
|
524 |
+
# f"Downloading and installing release wheels since we are in a Comfy {apply_color(mode,'cyan')} environment",
|
525 |
+
# "italic",
|
526 |
+
# color="yellow",
|
527 |
+
# )
|
528 |
+
# if full:
|
529 |
+
# print_formatted(
|
530 |
+
# f"Downloading and installing release wheels since no arguments where provided",
|
531 |
+
# "italic",
|
532 |
+
# color="yellow",
|
533 |
+
# )
|
534 |
+
|
535 |
+
print_formatted("Checking environment...", "italic", color="yellow")
|
536 |
+
missing_deps = []
|
537 |
+
if parsed_requirements := get_requirements(here / "reqs.txt"):
|
538 |
+
for requirement in parsed_requirements:
|
539 |
+
installed, pip_name, pip_spec, import_name = try_import(requirement)
|
540 |
+
if not installed:
|
541 |
+
missing_deps.append(pip_name.split("-")[0])
|
542 |
+
|
543 |
+
if not missing_deps:
|
544 |
+
print_formatted(
|
545 |
+
"All requirements are already installed. Enjoy 🚀",
|
546 |
+
"italic",
|
547 |
+
color="green",
|
548 |
+
)
|
549 |
+
sys.exit()
|
550 |
+
|
551 |
+
# # - Get the tag version from the GitHub API
|
552 |
+
# tag_data, tag_name = get_github_assets(tag=None)
|
553 |
+
|
554 |
+
# # - keep
|
555 |
+
# version = args.version
|
556 |
+
# # Compare the local and tag versions
|
557 |
+
# if version and tag_name:
|
558 |
+
# if re.match(r"v?(\d+(\.\d+)+)", version) and re.match(
|
559 |
+
# r"v?(\d+(\.\d+)+)", tag_name
|
560 |
+
# ):
|
561 |
+
# version_parts = [int(part) for part in version.lstrip("v").split(".")]
|
562 |
+
# tag_version_parts = [int(part) for part in tag_name.lstrip("v").split(".")]
|
563 |
+
|
564 |
+
# if version_parts > tag_version_parts:
|
565 |
+
# print_formatted(
|
566 |
+
# f"Local version ({version}) is greater than the release version ({tag_name}).",
|
567 |
+
# "bold",
|
568 |
+
# "yellow",
|
569 |
+
# )
|
570 |
+
# sys.exit()
|
571 |
+
|
572 |
+
# matching_assets = [
|
573 |
+
# asset
|
574 |
+
# for asset in tag_data["assets"]
|
575 |
+
# if asset["name"].endswith(".whl")
|
576 |
+
# and (
|
577 |
+
# "any" in asset["name"] or short_platform[current_platform] in asset["name"]
|
578 |
+
# )
|
579 |
+
# ]
|
580 |
+
# if not matching_assets:
|
581 |
+
# print_formatted(
|
582 |
+
# f"Unsupported operating system: {current_platform}", color="yellow"
|
583 |
+
# )
|
584 |
+
# wheel_order_asset = next(
|
585 |
+
# (asset for asset in tag_data["assets"] if asset["name"] == "wheel_order.txt"),
|
586 |
+
# None,
|
587 |
+
# )
|
588 |
+
# if wheel_order_asset is not None:
|
589 |
+
# print_formatted(
|
590 |
+
# "⚙️ Sorting the release wheels using wheels order", "italic", color="yellow"
|
591 |
+
# )
|
592 |
+
# response = requests.get(wheel_order_asset["browser_download_url"])
|
593 |
+
# if response.status_code == 200:
|
594 |
+
# wheel_order = [line.strip() for line in response.text.splitlines()]
|
595 |
+
|
596 |
+
# def get_order_index(val):
|
597 |
+
# try:
|
598 |
+
# return wheel_order.index(val)
|
599 |
+
# except ValueError:
|
600 |
+
# return len(wheel_order)
|
601 |
+
|
602 |
+
# matching_assets = sorted(
|
603 |
+
# matching_assets,
|
604 |
+
# key=lambda x: get_order_index(x["name"].split("-")[0]),
|
605 |
+
# )
|
606 |
+
# else:
|
607 |
+
# print("Failed to fetch wheel_order.txt. Status code:", response.status_code)
|
608 |
+
|
609 |
+
# missing_deps_urls = []
|
610 |
+
# for whl_file in matching_assets:
|
611 |
+
# # check if installed
|
612 |
+
# missing_deps_urls.append(whl_file["browser_download_url"])
|
613 |
+
|
614 |
+
install_cmd = [sys.executable, "-m", "pip", "install"]
|
615 |
+
|
616 |
+
# - Install all deps
|
617 |
+
if not args.dry:
|
618 |
+
if platform.system() == "Windows":
|
619 |
+
wheel_cmd = install_cmd + ["-r", (here / "reqs_windows.txt")]
|
620 |
+
else:
|
621 |
+
wheel_cmd = install_cmd + ["-r", (here / "reqs.txt")]
|
622 |
+
|
623 |
+
run_command(wheel_cmd)
|
624 |
+
print_formatted(
|
625 |
+
"✅ Successfully installed all dependencies.", "italic", color="green"
|
626 |
+
)
|
627 |
+
else:
|
628 |
+
print_formatted(
|
629 |
+
f"Would have run the following command:\n\t{apply_color(' '.join(install_cmd),'cyan')}",
|
630 |
+
"italic",
|
631 |
+
color="yellow",
|
632 |
+
)
|
comfy_mtb/log.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import re
|
3 |
+
import os
|
4 |
+
|
5 |
+
base_log_level = logging.DEBUG if os.environ.get("MTB_DEBUG") else logging.INFO
|
6 |
+
|
7 |
+
|
8 |
+
# Custom object that discards the output
|
9 |
+
class NullWriter:
|
10 |
+
def write(self, text):
|
11 |
+
pass
|
12 |
+
|
13 |
+
|
14 |
+
class Formatter(logging.Formatter):
|
15 |
+
grey = "\x1b[38;20m"
|
16 |
+
cyan = "\x1b[36;20m"
|
17 |
+
purple = "\x1b[35;20m"
|
18 |
+
yellow = "\x1b[33;20m"
|
19 |
+
red = "\x1b[31;20m"
|
20 |
+
bold_red = "\x1b[31;1m"
|
21 |
+
reset = "\x1b[0m"
|
22 |
+
# format = "%(asctime)s - [%(name)s] - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
|
23 |
+
format = "[%(name)s] | %(levelname)s -> %(message)s"
|
24 |
+
|
25 |
+
FORMATS = {
|
26 |
+
logging.DEBUG: purple + format + reset,
|
27 |
+
logging.INFO: cyan + format + reset,
|
28 |
+
logging.WARNING: yellow + format + reset,
|
29 |
+
logging.ERROR: red + format + reset,
|
30 |
+
logging.CRITICAL: bold_red + format + reset,
|
31 |
+
}
|
32 |
+
|
33 |
+
def format(self, record):
|
34 |
+
log_fmt = self.FORMATS.get(record.levelno)
|
35 |
+
formatter = logging.Formatter(log_fmt)
|
36 |
+
return formatter.format(record)
|
37 |
+
|
38 |
+
|
39 |
+
def mklog(name, level=base_log_level):
|
40 |
+
logger = logging.getLogger(name)
|
41 |
+
logger.setLevel(level)
|
42 |
+
|
43 |
+
for handler in logger.handlers:
|
44 |
+
logger.removeHandler(handler)
|
45 |
+
|
46 |
+
ch = logging.StreamHandler()
|
47 |
+
ch.setLevel(level)
|
48 |
+
ch.setFormatter(Formatter())
|
49 |
+
logger.addHandler(ch)
|
50 |
+
|
51 |
+
# Disable log propagation
|
52 |
+
logger.propagate = False
|
53 |
+
|
54 |
+
return logger
|
55 |
+
|
56 |
+
|
57 |
+
# - The main app logger
|
58 |
+
log = mklog(__package__, base_log_level)
|
59 |
+
|
60 |
+
|
61 |
+
def log_user(arg):
|
62 |
+
print("\033[34mComfy MTB Utils:\033[0m {arg}")
|
63 |
+
|
64 |
+
|
65 |
+
def get_summary(docstring):
|
66 |
+
return docstring.strip().split("\n\n", 1)[0]
|
67 |
+
|
68 |
+
|
69 |
+
def blue_text(text):
|
70 |
+
return f"\033[94m{text}\033[0m"
|
71 |
+
|
72 |
+
|
73 |
+
def cyan_text(text):
|
74 |
+
return f"\033[96m{text}\033[0m"
|
75 |
+
|
76 |
+
|
77 |
+
def get_label(label):
|
78 |
+
words = re.findall(r"(?:^|[A-Z])[a-z]*", label)
|
79 |
+
return " ".join(words).strip()
|
comfy_mtb/node_list.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"Animation Builder (mtb)": "Convenient way to manage basic animation maths at the core of many of my workflows",
|
3 |
+
"Any To String (mtb)": "Tries to take any input and convert it to a string",
|
4 |
+
"Bbox (mtb)": "The bounding box (BBOX) custom type used by other nodes",
|
5 |
+
"Bbox From Mask (mtb)": "From a mask extract the bounding box",
|
6 |
+
"Blur (mtb)": "Blur an image using a Gaussian filter.",
|
7 |
+
"Color Correct (mtb)": "Various color correction methods",
|
8 |
+
"Colored Image (mtb)": "Constant color image of given size",
|
9 |
+
"Concat Images (mtb)": "Add images to batch",
|
10 |
+
"Crop (mtb)": "Crops an image and an optional mask to a given bounding box\n\n The bounding box can be given as a tuple of (x, y, width, height) or as a BBOX type\n The BBOX input takes precedence over the tuple input\n ",
|
11 |
+
"Debug (mtb)": "Experimental node to debug any Comfy values, support for more types and widgets is planned",
|
12 |
+
"Deep Bump (mtb)": "Normal & height maps generation from single pictures",
|
13 |
+
"Export With Ffmpeg (mtb)": "Export with FFmpeg (Experimental)",
|
14 |
+
"Face Swap (mtb)": "Face swap using deepinsight/insightface models",
|
15 |
+
"Film Interpolation (mtb)": "Google Research FILM frame interpolation for large motion",
|
16 |
+
"Fit Number (mtb)": "Fit the input float using a source and target range",
|
17 |
+
"Float To Number (mtb)": "Node addon for the WAS Suite. Converts a \"comfy\" FLOAT to a NUMBER.",
|
18 |
+
"Get Batch From History (mtb)": "Very experimental node to load images from the history of the server.\n\n Queue items without output are ignored in the count.",
|
19 |
+
"Image Compare (mtb)": "Compare two images and return a difference image",
|
20 |
+
"Image Premultiply (mtb)": "Premultiply image with mask",
|
21 |
+
"Image Remove Background Rembg (mtb)": "Removes the background from the input using Rembg.",
|
22 |
+
"Image Resize Factor (mtb)": "Extracted mostly from WAS Node Suite, with a few edits (most notably multiple image support) and less features.",
|
23 |
+
"Int To Bool (mtb)": "Basic int to bool conversion",
|
24 |
+
"Int To Number (mtb)": "Node addon for the WAS Suite. Converts a \"comfy\" INT to a NUMBER.",
|
25 |
+
"Latent Lerp (mtb)": "Linear interpolation (blend) between two latent vectors",
|
26 |
+
"Load Face Analysis Model (mtb)": "Loads a face analysis model",
|
27 |
+
"Load Face Enhance Model (mtb)": "Loads a GFPGan or RestoreFormer model for face enhancement.",
|
28 |
+
"Load Face Swap Model (mtb)": "Loads a faceswap model",
|
29 |
+
"Load Film Model (mtb)": "Loads a FILM model",
|
30 |
+
"Load Image From Url (mtb)": "Load an image from the given URL",
|
31 |
+
"Load Image Sequence (mtb)": "Load an image sequence from a folder. The current frame is used to determine which image to load.\n\n Usually used in conjunction with the `Primitive` node set to increment to load a sequence of images from a folder.\n Use -1 to load all matching frames as a batch.\n ",
|
32 |
+
"Mask To Image (mtb)": "Converts a mask (alpha) to an RGB image with a color and background",
|
33 |
+
"Qr Code (mtb)": "Basic QR Code generator",
|
34 |
+
"Restore Face (mtb)": "Uses GFPGan to restore faces",
|
35 |
+
"Save Gif (mtb)": "Save the images from the batch as a GIF",
|
36 |
+
"Save Image Grid (mtb)": "Save all the images in the input batch as a grid of images.",
|
37 |
+
"Save Image Sequence (mtb)": "Save an image sequence to a folder. The current frame is used to determine which image to save.\n\n This is merely a wrapper around the `save_images` function with formatting for the output folder and filename.\n ",
|
38 |
+
"Save Tensors (mtb)": "Save torch tensors (image, mask or latent) to disk, useful to debug things outside comfy",
|
39 |
+
"Smart Step (mtb)": "Utils to control the steps start/stop of the KAdvancedSampler in percentage",
|
40 |
+
"String Replace (mtb)": "Basic string replacement",
|
41 |
+
"Styles Loader (mtb)": "Load csv files and populate a dropdown from the rows (\u00e0 la A111)",
|
42 |
+
"Text To Image (mtb)": "Utils to convert text to image using a font\n\n\n The tool looks for any .ttf file in the Comfy folder hierarchy.\n ",
|
43 |
+
"Transform Image (mtb)": "Save torch tensors (image, mask or latent) to disk, useful to debug things outside comfy\n\n\n it return a tensor representing the transformed images with the same shape as the input tensor\n ",
|
44 |
+
"Uncrop (mtb)": "Uncrops an image to a given bounding box\n\n The bounding box can be given as a tuple of (x, y, width, height) or as a BBOX type\n The BBOX input takes precedence over the tuple input",
|
45 |
+
"Unsplash Image (mtb)": "Unsplash Image given a keyword and a size"
|
46 |
+
}
|
comfy_mtb/nodes/__init__.py
ADDED
File without changes
|
comfy_mtb/nodes/animation.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..log import log
|
2 |
+
|
3 |
+
|
4 |
+
class AnimationBuilder:
|
5 |
+
"""Convenient way to manage basic animation maths at the core of many of my workflows"""
|
6 |
+
|
7 |
+
@classmethod
|
8 |
+
def INPUT_TYPES(cls):
|
9 |
+
return {
|
10 |
+
"required": {
|
11 |
+
"total_frames": ("INT", {"default": 100, "min": 0}),
|
12 |
+
# "fps": ("INT", {"default": 12, "min": 0}),
|
13 |
+
"scale_float": ("FLOAT", {"default": 1.0, "min": 0.0}),
|
14 |
+
"loop_count": ("INT", {"default": 1, "min": 0}),
|
15 |
+
"raw_iteration": ("INT", {"default": 0, "min": 0}),
|
16 |
+
"raw_loop": ("INT", {"default": 0, "min": 0}),
|
17 |
+
},
|
18 |
+
}
|
19 |
+
|
20 |
+
RETURN_TYPES = ("INT", "FLOAT", "INT", "BOOLEAN")
|
21 |
+
RETURN_NAMES = ("frame", "0-1 (scaled)", "count", "loop_ended")
|
22 |
+
CATEGORY = "mtb/animation"
|
23 |
+
FUNCTION = "build_animation"
|
24 |
+
|
25 |
+
def build_animation(
|
26 |
+
self,
|
27 |
+
total_frames=100,
|
28 |
+
# fps=12,
|
29 |
+
scale_float=1.0,
|
30 |
+
loop_count=1, # set in js
|
31 |
+
raw_iteration=0, # set in js
|
32 |
+
raw_loop=0, # set in js
|
33 |
+
):
|
34 |
+
frame = raw_iteration % (total_frames)
|
35 |
+
scaled = (frame / (total_frames - 1)) * scale_float
|
36 |
+
# if frame == 0:
|
37 |
+
# log.debug("Reseting history")
|
38 |
+
# PromptServer.instance.prompt_queue.wipe_history()
|
39 |
+
log.debug(f"frame: {frame}/{total_frames} scaled: {scaled}")
|
40 |
+
|
41 |
+
return (frame, scaled, raw_loop, (frame == (total_frames - 1)))
|
42 |
+
|
43 |
+
|
44 |
+
__nodes__ = [AnimationBuilder]
|
comfy_mtb/nodes/conditions.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..utils import here
|
2 |
+
from ..log import log
|
3 |
+
import folder_paths
|
4 |
+
from pathlib import Path
|
5 |
+
import shutil
|
6 |
+
import csv
|
7 |
+
|
8 |
+
|
9 |
+
class InterpolateClipSequential:
|
10 |
+
@classmethod
|
11 |
+
def INPUT_TYPES(cls):
|
12 |
+
return {
|
13 |
+
"required": {
|
14 |
+
"base_text": ("STRING", {"multiline": True}),
|
15 |
+
"text_to_replace": ("STRING", {"default": ""}),
|
16 |
+
"clip": ("CLIP",),
|
17 |
+
"interpolation_strength": (
|
18 |
+
"FLOAT",
|
19 |
+
{"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01},
|
20 |
+
),
|
21 |
+
}
|
22 |
+
}
|
23 |
+
|
24 |
+
RETURN_TYPES = ("CONDITIONING",)
|
25 |
+
FUNCTION = "interpolate_encodings_sequential"
|
26 |
+
|
27 |
+
CATEGORY = "mtb/conditioning"
|
28 |
+
|
29 |
+
def interpolate_encodings_sequential(
|
30 |
+
self, base_text, text_to_replace, clip, interpolation_strength, **replacements
|
31 |
+
):
|
32 |
+
log.debug(f"Received interpolation_strength: {interpolation_strength}")
|
33 |
+
|
34 |
+
# - Ensure interpolation strength is within [0, 1]
|
35 |
+
interpolation_strength = max(0.0, min(1.0, interpolation_strength))
|
36 |
+
|
37 |
+
# - Check if replacements were provided
|
38 |
+
if not replacements:
|
39 |
+
raise ValueError("At least one replacement should be provided.")
|
40 |
+
|
41 |
+
num_replacements = len(replacements)
|
42 |
+
log.debug(f"Number of replacements: {num_replacements}")
|
43 |
+
|
44 |
+
segment_length = 1.0 / num_replacements
|
45 |
+
log.debug(f"Calculated segment_length: {segment_length}")
|
46 |
+
|
47 |
+
# - Find the segment that the interpolation_strength falls into
|
48 |
+
segment_index = min(
|
49 |
+
int(interpolation_strength // segment_length), num_replacements - 1
|
50 |
+
)
|
51 |
+
log.debug(f"Segment index: {segment_index}")
|
52 |
+
|
53 |
+
# - Calculate the local strength within the segment
|
54 |
+
local_strength = (
|
55 |
+
interpolation_strength - (segment_index * segment_length)
|
56 |
+
) / segment_length
|
57 |
+
log.debug(f"Local strength: {local_strength}")
|
58 |
+
|
59 |
+
# - If it's the first segment, interpolate between base_text and the first replacement
|
60 |
+
if segment_index == 0:
|
61 |
+
replacement_text = list(replacements.values())[0]
|
62 |
+
log.debug("Using the base text a the base blend")
|
63 |
+
# - Start with the base_text condition
|
64 |
+
tokens = clip.tokenize(base_text)
|
65 |
+
cond_from, pooled_from = clip.encode_from_tokens(tokens, return_pooled=True)
|
66 |
+
else:
|
67 |
+
base_replace = list(replacements.values())[segment_index - 1]
|
68 |
+
log.debug(f"Using {base_replace} a the base blend")
|
69 |
+
|
70 |
+
# - Start with the base_text condition replaced by the closest replacement
|
71 |
+
tokens = clip.tokenize(base_text.replace(text_to_replace, base_replace))
|
72 |
+
cond_from, pooled_from = clip.encode_from_tokens(tokens, return_pooled=True)
|
73 |
+
|
74 |
+
replacement_text = list(replacements.values())[segment_index]
|
75 |
+
|
76 |
+
interpolated_text = base_text.replace(text_to_replace, replacement_text)
|
77 |
+
tokens = clip.tokenize(interpolated_text)
|
78 |
+
cond_to, pooled_to = clip.encode_from_tokens(tokens, return_pooled=True)
|
79 |
+
|
80 |
+
# - Linearly interpolate between the two conditions
|
81 |
+
interpolated_condition = (
|
82 |
+
1.0 - local_strength
|
83 |
+
) * cond_from + local_strength * cond_to
|
84 |
+
interpolated_pooled = (
|
85 |
+
1.0 - local_strength
|
86 |
+
) * pooled_from + local_strength * pooled_to
|
87 |
+
|
88 |
+
return ([[interpolated_condition, {"pooled_output": interpolated_pooled}]],)
|
89 |
+
|
90 |
+
|
91 |
+
class SmartStep:
|
92 |
+
"""Utils to control the steps start/stop of the KAdvancedSampler in percentage"""
|
93 |
+
|
94 |
+
@classmethod
|
95 |
+
def INPUT_TYPES(cls):
|
96 |
+
return {
|
97 |
+
"required": {
|
98 |
+
"step": (
|
99 |
+
"INT",
|
100 |
+
{"default": 20, "min": 1, "max": 10000, "step": 1},
|
101 |
+
),
|
102 |
+
"start_percent": (
|
103 |
+
"INT",
|
104 |
+
{"default": 0, "min": 0, "max": 100, "step": 1},
|
105 |
+
),
|
106 |
+
"end_percent": (
|
107 |
+
"INT",
|
108 |
+
{"default": 0, "min": 0, "max": 100, "step": 1},
|
109 |
+
),
|
110 |
+
}
|
111 |
+
}
|
112 |
+
|
113 |
+
RETURN_TYPES = ("INT", "INT", "INT")
|
114 |
+
RETURN_NAMES = ("step", "start", "end")
|
115 |
+
FUNCTION = "do_step"
|
116 |
+
CATEGORY = "mtb/conditioning"
|
117 |
+
|
118 |
+
def do_step(self, step, start_percent, end_percent):
|
119 |
+
start = int(step * start_percent / 100)
|
120 |
+
end = int(step * end_percent / 100)
|
121 |
+
|
122 |
+
return (step, start, end)
|
123 |
+
|
124 |
+
|
125 |
+
def install_default_styles(force=False):
|
126 |
+
styles_dir = Path(folder_paths.base_path) / "styles"
|
127 |
+
styles_dir.mkdir(parents=True, exist_ok=True)
|
128 |
+
default_style = here / "styles.csv"
|
129 |
+
dest_style = styles_dir / "default.csv"
|
130 |
+
|
131 |
+
if force or not dest_style.exists():
|
132 |
+
log.debug(f"Copying default style to {dest_style}")
|
133 |
+
shutil.copy2(default_style.as_posix(), dest_style.as_posix())
|
134 |
+
|
135 |
+
return dest_style
|
136 |
+
|
137 |
+
|
138 |
+
class StylesLoader:
|
139 |
+
"""Load csv files and populate a dropdown from the rows (à la A111)"""
|
140 |
+
|
141 |
+
options = {}
|
142 |
+
|
143 |
+
@classmethod
|
144 |
+
def INPUT_TYPES(cls):
|
145 |
+
if not cls.options:
|
146 |
+
input_dir = Path(folder_paths.base_path) / "styles"
|
147 |
+
if not input_dir.exists():
|
148 |
+
install_default_styles()
|
149 |
+
|
150 |
+
if not (files := [f for f in input_dir.iterdir() if f.suffix == ".csv"]):
|
151 |
+
log.warn(
|
152 |
+
"No styles found in the styles folder, place at least one csv file in the styles folder at the root of ComfyUI (for instance ComfyUI/styles/mystyle.csv)"
|
153 |
+
)
|
154 |
+
|
155 |
+
for file in files:
|
156 |
+
with open(file, "r", encoding="utf8") as f:
|
157 |
+
parsed = csv.reader(f)
|
158 |
+
for row in parsed:
|
159 |
+
log.debug(f"Adding style {row[0]}")
|
160 |
+
cls.options[row[0]] = (row[1], row[2])
|
161 |
+
|
162 |
+
else:
|
163 |
+
log.debug(f"Using cached styles (count: {len(cls.options)})")
|
164 |
+
|
165 |
+
return {
|
166 |
+
"required": {
|
167 |
+
"style_name": (list(cls.options.keys()),),
|
168 |
+
}
|
169 |
+
}
|
170 |
+
|
171 |
+
CATEGORY = "mtb/conditioning"
|
172 |
+
|
173 |
+
RETURN_TYPES = ("STRING", "STRING")
|
174 |
+
RETURN_NAMES = ("positive", "negative")
|
175 |
+
FUNCTION = "load_style"
|
176 |
+
|
177 |
+
def load_style(self, style_name):
|
178 |
+
return (self.options[style_name][0], self.options[style_name][1])
|
179 |
+
|
180 |
+
|
181 |
+
__nodes__ = [SmartStep, StylesLoader, InterpolateClipSequential]
|
comfy_mtb/nodes/crop.py
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from ..utils import tensor2pil, pil2tensor, tensor2np, np2tensor
|
3 |
+
from PIL import Image, ImageFilter, ImageDraw, ImageChops
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
from ..log import log
|
7 |
+
|
8 |
+
|
9 |
+
class Bbox:
|
10 |
+
"""The bounding box (BBOX) custom type used by other nodes"""
|
11 |
+
|
12 |
+
@classmethod
|
13 |
+
def INPUT_TYPES(cls):
|
14 |
+
return {
|
15 |
+
"required": {
|
16 |
+
# "bbox": ("BBOX",),
|
17 |
+
"x": ("INT", {"default": 0, "max": 10000000, "min": 0, "step": 1}),
|
18 |
+
"y": ("INT", {"default": 0, "max": 10000000, "min": 0, "step": 1}),
|
19 |
+
"width": (
|
20 |
+
"INT",
|
21 |
+
{"default": 256, "max": 10000000, "min": 0, "step": 1},
|
22 |
+
),
|
23 |
+
"height": (
|
24 |
+
"INT",
|
25 |
+
{"default": 256, "max": 10000000, "min": 0, "step": 1},
|
26 |
+
),
|
27 |
+
}
|
28 |
+
}
|
29 |
+
|
30 |
+
RETURN_TYPES = ("BBOX",)
|
31 |
+
FUNCTION = "do_crop"
|
32 |
+
CATEGORY = "mtb/crop"
|
33 |
+
|
34 |
+
def do_crop(self, x, y, width, height): # bbox
|
35 |
+
return (x, y, width, height)
|
36 |
+
# return bbox
|
37 |
+
|
38 |
+
|
39 |
+
class BboxFromMask:
|
40 |
+
"""From a mask extract the bounding box"""
|
41 |
+
|
42 |
+
@classmethod
|
43 |
+
def INPUT_TYPES(cls):
|
44 |
+
return {
|
45 |
+
"required": {
|
46 |
+
"mask": ("MASK",),
|
47 |
+
},
|
48 |
+
"optional": {
|
49 |
+
"image": ("IMAGE",),
|
50 |
+
},
|
51 |
+
}
|
52 |
+
|
53 |
+
RETURN_TYPES = (
|
54 |
+
"BBOX",
|
55 |
+
"IMAGE",
|
56 |
+
)
|
57 |
+
RETURN_NAMES = (
|
58 |
+
"bbox",
|
59 |
+
"image (optional)",
|
60 |
+
)
|
61 |
+
FUNCTION = "extract_bounding_box"
|
62 |
+
CATEGORY = "mtb/crop"
|
63 |
+
|
64 |
+
def extract_bounding_box(self, mask: torch.Tensor, image=None):
|
65 |
+
# if image != None:
|
66 |
+
# if mask.size(0) != image.size(0):
|
67 |
+
# if mask.size(0) != 1:
|
68 |
+
# log.error(
|
69 |
+
# f"Batch count mismatch for mask and image, it can either be 1 mask for X images, or X masks for X images (mask: {mask.shape} | image: {image.shape})"
|
70 |
+
# )
|
71 |
+
|
72 |
+
# raise Exception(
|
73 |
+
# f"Batch count mismatch for mask and image, it can either be 1 mask for X images, or X masks for X images (mask: {mask.shape} | image: {image.shape})"
|
74 |
+
# )
|
75 |
+
|
76 |
+
_mask = tensor2pil(1.0 - mask)[0]
|
77 |
+
|
78 |
+
# we invert it
|
79 |
+
alpha_channel = np.array(_mask)
|
80 |
+
|
81 |
+
non_zero_indices = np.nonzero(alpha_channel)
|
82 |
+
|
83 |
+
min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
|
84 |
+
min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0])
|
85 |
+
|
86 |
+
# Create a bounding box tuple
|
87 |
+
if image != None:
|
88 |
+
# Convert the image to a NumPy array
|
89 |
+
imgs = tensor2np(image)
|
90 |
+
out = []
|
91 |
+
for img in imgs:
|
92 |
+
# Crop the image from the bounding box
|
93 |
+
img = img[min_y:max_y, min_x:max_x, :]
|
94 |
+
log.debug(f"Cropped image to shape {img.shape}")
|
95 |
+
out.append(img)
|
96 |
+
|
97 |
+
image = np2tensor(out)
|
98 |
+
log.debug(f"Cropped images shape: {image.shape}")
|
99 |
+
bounding_box = (min_x, min_y, max_x - min_x, max_y - min_y)
|
100 |
+
return (
|
101 |
+
bounding_box,
|
102 |
+
image,
|
103 |
+
)
|
104 |
+
|
105 |
+
|
106 |
+
class Crop:
|
107 |
+
"""Crops an image and an optional mask to a given bounding box
|
108 |
+
|
109 |
+
The bounding box can be given as a tuple of (x, y, width, height) or as a BBOX type
|
110 |
+
The BBOX input takes precedence over the tuple input
|
111 |
+
"""
|
112 |
+
|
113 |
+
@classmethod
|
114 |
+
def INPUT_TYPES(cls):
|
115 |
+
return {
|
116 |
+
"required": {
|
117 |
+
"image": ("IMAGE",),
|
118 |
+
},
|
119 |
+
"optional": {
|
120 |
+
"mask": ("MASK",),
|
121 |
+
"x": ("INT", {"default": 0, "max": 10000000, "min": 0, "step": 1}),
|
122 |
+
"y": ("INT", {"default": 0, "max": 10000000, "min": 0, "step": 1}),
|
123 |
+
"width": (
|
124 |
+
"INT",
|
125 |
+
{"default": 256, "max": 10000000, "min": 0, "step": 1},
|
126 |
+
),
|
127 |
+
"height": (
|
128 |
+
"INT",
|
129 |
+
{"default": 256, "max": 10000000, "min": 0, "step": 1},
|
130 |
+
),
|
131 |
+
"bbox": ("BBOX",),
|
132 |
+
},
|
133 |
+
}
|
134 |
+
|
135 |
+
RETURN_TYPES = ("IMAGE", "MASK", "BBOX")
|
136 |
+
FUNCTION = "do_crop"
|
137 |
+
|
138 |
+
CATEGORY = "mtb/crop"
|
139 |
+
|
140 |
+
def do_crop(
|
141 |
+
self, image: torch.Tensor, mask=None, x=0, y=0, width=256, height=256, bbox=None
|
142 |
+
):
|
143 |
+
image = image.numpy()
|
144 |
+
if mask:
|
145 |
+
mask = mask.numpy()
|
146 |
+
|
147 |
+
if bbox != None:
|
148 |
+
x, y, width, height = bbox
|
149 |
+
|
150 |
+
cropped_image = image[:, y : y + height, x : x + width, :]
|
151 |
+
cropped_mask = mask[y : y + height, x : x + width] if mask != None else None
|
152 |
+
crop_data = (x, y, width, height)
|
153 |
+
|
154 |
+
return (
|
155 |
+
torch.from_numpy(cropped_image),
|
156 |
+
torch.from_numpy(cropped_mask) if mask != None else None,
|
157 |
+
crop_data,
|
158 |
+
)
|
159 |
+
|
160 |
+
|
161 |
+
# def calculate_intersection(rect1, rect2):
|
162 |
+
# x_left = max(rect1[0], rect2[0])
|
163 |
+
# y_top = max(rect1[1], rect2[1])
|
164 |
+
# x_right = min(rect1[2], rect2[2])
|
165 |
+
# y_bottom = min(rect1[3], rect2[3])
|
166 |
+
|
167 |
+
# return (x_left, y_top, x_right, y_bottom)
|
168 |
+
|
169 |
+
|
170 |
+
def bbox_check(bbox, target_size=None):
|
171 |
+
if not target_size:
|
172 |
+
return bbox
|
173 |
+
|
174 |
+
new_bbox = (
|
175 |
+
bbox[0],
|
176 |
+
bbox[1],
|
177 |
+
min(target_size[0] - bbox[0], bbox[2]),
|
178 |
+
min(target_size[1] - bbox[1], bbox[3]),
|
179 |
+
)
|
180 |
+
if new_bbox != bbox:
|
181 |
+
log.warn(f"BBox too big, constrained to {new_bbox}")
|
182 |
+
|
183 |
+
return new_bbox
|
184 |
+
|
185 |
+
|
186 |
+
def bbox_to_region(bbox, target_size=None):
|
187 |
+
bbox = bbox_check(bbox, target_size)
|
188 |
+
|
189 |
+
# to region
|
190 |
+
return (bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3])
|
191 |
+
|
192 |
+
|
193 |
+
class Uncrop:
|
194 |
+
"""Uncrops an image to a given bounding box
|
195 |
+
|
196 |
+
The bounding box can be given as a tuple of (x, y, width, height) or as a BBOX type
|
197 |
+
The BBOX input takes precedence over the tuple input"""
|
198 |
+
|
199 |
+
@classmethod
|
200 |
+
def INPUT_TYPES(cls):
|
201 |
+
return {
|
202 |
+
"required": {
|
203 |
+
"image": ("IMAGE",),
|
204 |
+
"crop_image": ("IMAGE",),
|
205 |
+
"bbox": ("BBOX",),
|
206 |
+
"border_blending": (
|
207 |
+
"FLOAT",
|
208 |
+
{"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01},
|
209 |
+
),
|
210 |
+
}
|
211 |
+
}
|
212 |
+
|
213 |
+
RETURN_TYPES = ("IMAGE",)
|
214 |
+
FUNCTION = "do_crop"
|
215 |
+
|
216 |
+
CATEGORY = "mtb/crop"
|
217 |
+
|
218 |
+
def do_crop(self, image, crop_image, bbox, border_blending):
|
219 |
+
def inset_border(image, border_width=20, border_color=(0)):
|
220 |
+
width, height = image.size
|
221 |
+
bordered_image = Image.new(image.mode, (width, height), border_color)
|
222 |
+
bordered_image.paste(image, (0, 0))
|
223 |
+
draw = ImageDraw.Draw(bordered_image)
|
224 |
+
draw.rectangle(
|
225 |
+
(0, 0, width - 1, height - 1), outline=border_color, width=border_width
|
226 |
+
)
|
227 |
+
return bordered_image
|
228 |
+
|
229 |
+
single = image.size(0) == 1
|
230 |
+
if image.size(0) != crop_image.size(0):
|
231 |
+
if not single:
|
232 |
+
raise ValueError(
|
233 |
+
"The Image batch count is greater than 1, but doesn't match the crop_image batch count. If using batches they should either match or only crop_image must be greater than 1"
|
234 |
+
)
|
235 |
+
|
236 |
+
images = tensor2pil(image)
|
237 |
+
crop_imgs = tensor2pil(crop_image)
|
238 |
+
out_images = []
|
239 |
+
for i, crop in enumerate(crop_imgs):
|
240 |
+
if single:
|
241 |
+
img = images[0]
|
242 |
+
else:
|
243 |
+
img = images[i]
|
244 |
+
|
245 |
+
# uncrop the image based on the bounding box
|
246 |
+
bb_x, bb_y, bb_width, bb_height = bbox
|
247 |
+
|
248 |
+
paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size)
|
249 |
+
# log.debug(f"Paste region: {paste_region}")
|
250 |
+
# new_region = adjust_paste_region(img.size, paste_region)
|
251 |
+
# log.debug(f"Adjusted paste region: {new_region}")
|
252 |
+
# # Check if the adjusted paste region is different from the original
|
253 |
+
|
254 |
+
crop_img = crop.convert("RGB")
|
255 |
+
|
256 |
+
log.debug(f"Crop image size: {crop_img.size}")
|
257 |
+
log.debug(f"Image size: {img.size}")
|
258 |
+
|
259 |
+
if border_blending > 1.0:
|
260 |
+
border_blending = 1.0
|
261 |
+
elif border_blending < 0.0:
|
262 |
+
border_blending = 0.0
|
263 |
+
|
264 |
+
blend_ratio = (max(crop_img.size) / 2) * float(border_blending)
|
265 |
+
|
266 |
+
blend = img.convert("RGBA")
|
267 |
+
mask = Image.new("L", img.size, 0)
|
268 |
+
|
269 |
+
mask_block = Image.new("L", (bb_width, bb_height), 255)
|
270 |
+
mask_block = inset_border(mask_block, int(blend_ratio / 2), (0))
|
271 |
+
|
272 |
+
mask.paste(mask_block, paste_region)
|
273 |
+
log.debug(f"Blend size: {blend.size} | kind {blend.mode}")
|
274 |
+
log.debug(f"Crop image size: {crop_img.size} | kind {crop_img.mode}")
|
275 |
+
log.debug(f"BBox: {paste_region}")
|
276 |
+
blend.paste(crop_img, paste_region)
|
277 |
+
|
278 |
+
mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4))
|
279 |
+
mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4))
|
280 |
+
|
281 |
+
blend.putalpha(mask)
|
282 |
+
img = Image.alpha_composite(img.convert("RGBA"), blend)
|
283 |
+
out_images.append(img.convert("RGB"))
|
284 |
+
|
285 |
+
return (pil2tensor(out_images),)
|
286 |
+
|
287 |
+
|
288 |
+
__nodes__ = [BboxFromMask, Bbox, Crop, Uncrop]
|
comfy_mtb/nodes/debug.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..utils import tensor2pil
|
2 |
+
from ..log import log
|
3 |
+
import io, base64
|
4 |
+
import torch
|
5 |
+
import folder_paths
|
6 |
+
from typing import Optional
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
|
10 |
+
class Debug:
|
11 |
+
"""Experimental node to debug any Comfy values, support for more types and widgets is planned"""
|
12 |
+
|
13 |
+
@classmethod
|
14 |
+
def INPUT_TYPES(cls):
|
15 |
+
return {
|
16 |
+
"required": {"anything_1": ("*")},
|
17 |
+
}
|
18 |
+
|
19 |
+
RETURN_TYPES = ("STRING",)
|
20 |
+
FUNCTION = "do_debug"
|
21 |
+
CATEGORY = "mtb/debug"
|
22 |
+
OUTPUT_NODE = True
|
23 |
+
|
24 |
+
def do_debug(self, **kwargs):
|
25 |
+
output = {
|
26 |
+
"ui": {"b64_images": [], "text": []},
|
27 |
+
"result": ("A"),
|
28 |
+
}
|
29 |
+
for k, v in kwargs.items():
|
30 |
+
anything = v
|
31 |
+
text = ""
|
32 |
+
if isinstance(anything, torch.Tensor):
|
33 |
+
log.debug(f"Tensor: {anything.shape}")
|
34 |
+
|
35 |
+
# write the images to temp
|
36 |
+
|
37 |
+
image = tensor2pil(anything)
|
38 |
+
b64_imgs = []
|
39 |
+
for im in image:
|
40 |
+
buffered = io.BytesIO()
|
41 |
+
im.save(buffered, format="PNG")
|
42 |
+
b64_imgs.append(
|
43 |
+
"data:image/png;base64,"
|
44 |
+
+ base64.b64encode(buffered.getvalue()).decode("utf-8")
|
45 |
+
)
|
46 |
+
|
47 |
+
output["ui"]["b64_images"] += b64_imgs
|
48 |
+
log.debug(f"Input {k} contains {len(b64_imgs)} images")
|
49 |
+
elif isinstance(anything, bool):
|
50 |
+
log.debug(f"Input {k} contains boolean: {anything}")
|
51 |
+
output["ui"]["text"] += ["True" if anything else "False"]
|
52 |
+
else:
|
53 |
+
text = str(anything)
|
54 |
+
log.debug(f"Input {k} contains text: {text}")
|
55 |
+
output["ui"]["text"] += [text]
|
56 |
+
|
57 |
+
return output
|
58 |
+
|
59 |
+
|
60 |
+
class SaveTensors:
|
61 |
+
"""Save torch tensors (image, mask or latent) to disk, useful to debug things outside comfy"""
|
62 |
+
|
63 |
+
def __init__(self):
|
64 |
+
self.output_dir = folder_paths.get_output_directory()
|
65 |
+
self.type = "mtb/debug"
|
66 |
+
|
67 |
+
@classmethod
|
68 |
+
def INPUT_TYPES(cls):
|
69 |
+
return {
|
70 |
+
"required": {
|
71 |
+
"filename_prefix": ("STRING", {"default": "ComfyPickle"}),
|
72 |
+
},
|
73 |
+
"optional": {
|
74 |
+
"image": ("IMAGE",),
|
75 |
+
"mask": ("MASK",),
|
76 |
+
"latent": ("LATENT",),
|
77 |
+
},
|
78 |
+
}
|
79 |
+
|
80 |
+
FUNCTION = "save"
|
81 |
+
OUTPUT_NODE = True
|
82 |
+
RETURN_TYPES = ()
|
83 |
+
CATEGORY = "mtb/debug"
|
84 |
+
|
85 |
+
def save(
|
86 |
+
self,
|
87 |
+
filename_prefix,
|
88 |
+
image: Optional[torch.Tensor] = None,
|
89 |
+
mask: Optional[torch.Tensor] = None,
|
90 |
+
latent: Optional[torch.Tensor] = None,
|
91 |
+
):
|
92 |
+
(
|
93 |
+
full_output_folder,
|
94 |
+
filename,
|
95 |
+
counter,
|
96 |
+
subfolder,
|
97 |
+
filename_prefix,
|
98 |
+
) = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
|
99 |
+
full_output_folder = Path(full_output_folder)
|
100 |
+
if image is not None:
|
101 |
+
image_file = f"{filename}_image_{counter:05}.pt"
|
102 |
+
torch.save(image, full_output_folder / image_file)
|
103 |
+
# np.save(full_output_folder/ image_file, image.cpu().numpy())
|
104 |
+
|
105 |
+
if mask is not None:
|
106 |
+
mask_file = f"{filename}_mask_{counter:05}.pt"
|
107 |
+
torch.save(mask, full_output_folder / mask_file)
|
108 |
+
# np.save(full_output_folder/ mask_file, mask.cpu().numpy())
|
109 |
+
|
110 |
+
if latent is not None:
|
111 |
+
# for latent we must use pickle
|
112 |
+
latent_file = f"{filename}_latent_{counter:05}.pt"
|
113 |
+
torch.save(latent, full_output_folder / latent_file)
|
114 |
+
# pickle.dump(latent, open(full_output_folder/ latent_file, "wb"))
|
115 |
+
|
116 |
+
# np.save(full_output_folder/ latent_file, latent[""].cpu().numpy())
|
117 |
+
|
118 |
+
return f"{filename_prefix}_{counter:05}"
|
119 |
+
|
120 |
+
|
121 |
+
__nodes__ = [Debug, SaveTensors]
|
comfy_mtb/nodes/deep_bump.py
ADDED
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import onnxruntime as ort
|
2 |
+
import numpy as np
|
3 |
+
import pathlib
|
4 |
+
import onnxruntime as ort
|
5 |
+
import numpy as np
|
6 |
+
from .. import utils as utils_inference
|
7 |
+
from ..log import log
|
8 |
+
|
9 |
+
# Disable MS telemetry
|
10 |
+
ort.disable_telemetry_events()
|
11 |
+
|
12 |
+
|
13 |
+
# - COLOR to NORMALS
|
14 |
+
def color_to_normals(color_img, overlap, progress_callback):
|
15 |
+
"""Computes a normal map from the given color map. 'color_img' must be a numpy array
|
16 |
+
in C,H,W format (with C as RGB). 'overlap' must be one of 'SMALL', 'MEDIUM', 'LARGE'.
|
17 |
+
"""
|
18 |
+
|
19 |
+
# Remove alpha & convert to grayscale
|
20 |
+
img = np.mean(color_img[:3], axis=0, keepdimss=True)
|
21 |
+
|
22 |
+
# Split image in tiles
|
23 |
+
log.debug("DeepBump Color → Normals : tilling")
|
24 |
+
tile_size = 256
|
25 |
+
overlaps = {
|
26 |
+
"SMALL": tile_size // 6,
|
27 |
+
"MEDIUM": tile_size // 4,
|
28 |
+
"LARGE": tile_size // 2,
|
29 |
+
}
|
30 |
+
stride_size = tile_size - overlaps[overlap]
|
31 |
+
tiles, paddings = utils_inference.tiles_split(
|
32 |
+
img, (tile_size, tile_size), (stride_size, stride_size)
|
33 |
+
)
|
34 |
+
|
35 |
+
# Load model
|
36 |
+
log.debug("DeepBump Color → Normals : loading model")
|
37 |
+
addon_path = str(pathlib.Path(__file__).parent.absolute())
|
38 |
+
ort_session = ort.InferenceSession(f"{addon_path}/models/deepbump256.onnx")
|
39 |
+
|
40 |
+
# Predict normal map for each tile
|
41 |
+
log.debug("DeepBump Color → Normals : generating")
|
42 |
+
pred_tiles = utils_inference.tiles_infer(
|
43 |
+
tiles, ort_session, progress_callback=progress_callback
|
44 |
+
)
|
45 |
+
|
46 |
+
# Merge tiles
|
47 |
+
log.debug("DeepBump Color → Normals : merging")
|
48 |
+
pred_img = utils_inference.tiles_merge(
|
49 |
+
pred_tiles,
|
50 |
+
(stride_size, stride_size),
|
51 |
+
(3, img.shape[1], img.shape[2]),
|
52 |
+
paddings,
|
53 |
+
)
|
54 |
+
|
55 |
+
# Normalize each pixel to unit vector
|
56 |
+
pred_img = utils_inference.normalize(pred_img)
|
57 |
+
|
58 |
+
return pred_img
|
59 |
+
|
60 |
+
|
61 |
+
# - NORMALS to CURVATURE
|
62 |
+
def conv_1d(array, kernel_1d):
|
63 |
+
"""Performs row by row 1D convolutions of the given 2D image with the given 1D kernel."""
|
64 |
+
|
65 |
+
# Input kernel length must be odd
|
66 |
+
k_l = len(kernel_1d)
|
67 |
+
assert k_l % 2 != 0
|
68 |
+
# Convolution is repeat-padded
|
69 |
+
extended = np.pad(array, k_l // 2, mode="wrap")
|
70 |
+
# Output has same size as input (padded, valid-mode convolution)
|
71 |
+
output = np.empty(array.shape)
|
72 |
+
for i in range(array.shape[0]):
|
73 |
+
output[i] = np.convolve(extended[i + (k_l // 2)], kernel_1d, mode="valid")
|
74 |
+
|
75 |
+
return output * -1
|
76 |
+
|
77 |
+
|
78 |
+
def gaussian_kernel(length, sigma):
|
79 |
+
"""Returns a 1D gaussian kernel of size 'length'."""
|
80 |
+
|
81 |
+
space = np.linspace(-(length - 1) / 2, (length - 1) / 2, length)
|
82 |
+
kernel = np.exp(-0.5 * np.square(space) / np.square(sigma))
|
83 |
+
return kernel / np.sum(kernel)
|
84 |
+
|
85 |
+
|
86 |
+
def normalize(np_array):
|
87 |
+
"""Normalize all elements of the given numpy array to [0,1]"""
|
88 |
+
|
89 |
+
return (np_array - np.min(np_array)) / (np.max(np_array) - np.min(np_array))
|
90 |
+
|
91 |
+
|
92 |
+
def normals_to_curvature(normals_img, blur_radius, progress_callback):
|
93 |
+
"""Computes a curvature map from the given normal map. 'normals_img' must be a numpy array
|
94 |
+
in C,H,W format (with C as RGB). 'blur_radius' must be one of 'SMALLEST', 'SMALLER', 'SMALL',
|
95 |
+
'MEDIUM', 'LARGE', 'LARGER', 'LARGEST'."""
|
96 |
+
|
97 |
+
# Convolutions on normal map red & green channels
|
98 |
+
if progress_callback is not None:
|
99 |
+
progress_callback(0, 4)
|
100 |
+
diff_kernel = np.array([-1, 0, 1])
|
101 |
+
h_conv = conv_1d(normals_img[0, :, :], diff_kernel)
|
102 |
+
if progress_callback is not None:
|
103 |
+
progress_callback(1, 4)
|
104 |
+
v_conv = conv_1d(-1 * normals_img[1, :, :].T, diff_kernel).T
|
105 |
+
if progress_callback is not None:
|
106 |
+
progress_callback(2, 4)
|
107 |
+
|
108 |
+
# Sum detected edges
|
109 |
+
edges_conv = h_conv + v_conv
|
110 |
+
|
111 |
+
# Blur radius size is proportional to img sizes
|
112 |
+
blur_factors = {
|
113 |
+
"SMALLEST": 1 / 256,
|
114 |
+
"SMALLER": 1 / 128,
|
115 |
+
"SMALL": 1 / 64,
|
116 |
+
"MEDIUM": 1 / 32,
|
117 |
+
"LARGE": 1 / 16,
|
118 |
+
"LARGER": 1 / 8,
|
119 |
+
"LARGEST": 1 / 4,
|
120 |
+
}
|
121 |
+
assert blur_radius in blur_factors
|
122 |
+
blur_radius_px = int(np.mean(normals_img.shape[1:3]) * blur_factors[blur_radius])
|
123 |
+
|
124 |
+
# If blur radius too small, do not blur
|
125 |
+
if blur_radius_px < 2:
|
126 |
+
edges_conv = normalize(edges_conv)
|
127 |
+
return np.stack([edges_conv, edges_conv, edges_conv])
|
128 |
+
|
129 |
+
# Make sure blur kernel length is odd
|
130 |
+
if blur_radius_px % 2 == 0:
|
131 |
+
blur_radius_px += 1
|
132 |
+
|
133 |
+
# Blur curvature with separated convolutions
|
134 |
+
sigma = blur_radius_px // 8
|
135 |
+
if sigma == 0:
|
136 |
+
sigma = 1
|
137 |
+
g_kernel = gaussian_kernel(blur_radius_px, sigma)
|
138 |
+
h_blur = conv_1d(edges_conv, g_kernel)
|
139 |
+
if progress_callback is not None:
|
140 |
+
progress_callback(3, 4)
|
141 |
+
v_blur = conv_1d(h_blur.T, g_kernel).T
|
142 |
+
if progress_callback is not None:
|
143 |
+
progress_callback(4, 4)
|
144 |
+
|
145 |
+
# Normalize to [0,1]
|
146 |
+
curvature = normalize(v_blur)
|
147 |
+
|
148 |
+
# Expand single channel the three channels (RGB)
|
149 |
+
return np.stack([curvature, curvature, curvature])
|
150 |
+
|
151 |
+
|
152 |
+
# - NORMALS to HEIGHT
|
153 |
+
def normals_to_grad(normals_img):
|
154 |
+
return (normals_img[0] - 0.5) * 2, (normals_img[1] - 0.5) * 2
|
155 |
+
|
156 |
+
|
157 |
+
def copy_flip(grad_x, grad_y):
|
158 |
+
"""Concat 4 flipped copies of input gradients (makes them wrap).
|
159 |
+
Output is twice bigger in both dimensions."""
|
160 |
+
|
161 |
+
grad_x_top = np.hstack([grad_x, -np.flip(grad_x, axis=1)])
|
162 |
+
grad_x_bottom = np.hstack([np.flip(grad_x, axis=0), -np.flip(grad_x)])
|
163 |
+
new_grad_x = np.vstack([grad_x_top, grad_x_bottom])
|
164 |
+
|
165 |
+
grad_y_top = np.hstack([grad_y, np.flip(grad_y, axis=1)])
|
166 |
+
grad_y_bottom = np.hstack([-np.flip(grad_y, axis=0), -np.flip(grad_y)])
|
167 |
+
new_grad_y = np.vstack([grad_y_top, grad_y_bottom])
|
168 |
+
|
169 |
+
return new_grad_x, new_grad_y
|
170 |
+
|
171 |
+
|
172 |
+
def frankot_chellappa(grad_x, grad_y, progress_callback=None):
|
173 |
+
"""Frankot-Chellappa depth-from-gradient algorithm."""
|
174 |
+
|
175 |
+
if progress_callback is not None:
|
176 |
+
progress_callback(0, 3)
|
177 |
+
|
178 |
+
rows, cols = grad_x.shape
|
179 |
+
|
180 |
+
rows_scale = (np.arange(rows) - (rows // 2 + 1)) / (rows - rows % 2)
|
181 |
+
cols_scale = (np.arange(cols) - (cols // 2 + 1)) / (cols - cols % 2)
|
182 |
+
|
183 |
+
u_grid, v_grid = np.meshgrid(cols_scale, rows_scale)
|
184 |
+
|
185 |
+
u_grid = np.fft.ifftshift(u_grid)
|
186 |
+
v_grid = np.fft.ifftshift(v_grid)
|
187 |
+
|
188 |
+
if progress_callback is not None:
|
189 |
+
progress_callback(1, 3)
|
190 |
+
|
191 |
+
grad_x_F = np.fft.fft2(grad_x)
|
192 |
+
grad_y_F = np.fft.fft2(grad_y)
|
193 |
+
|
194 |
+
if progress_callback is not None:
|
195 |
+
progress_callback(2, 3)
|
196 |
+
|
197 |
+
nominator = (-1j * u_grid * grad_x_F) + (-1j * v_grid * grad_y_F)
|
198 |
+
denominator = (u_grid**2) + (v_grid**2) + 1e-16
|
199 |
+
|
200 |
+
Z_F = nominator / denominator
|
201 |
+
Z_F[0, 0] = 0.0
|
202 |
+
|
203 |
+
Z = np.real(np.fft.ifft2(Z_F))
|
204 |
+
|
205 |
+
if progress_callback is not None:
|
206 |
+
progress_callback(3, 3)
|
207 |
+
|
208 |
+
return (Z - np.min(Z)) / (np.max(Z) - np.min(Z))
|
209 |
+
|
210 |
+
|
211 |
+
def normals_to_height(normals_img, seamless, progress_callback):
|
212 |
+
"""Computes a height map from the given normal map. 'normals_img' must be a numpy array
|
213 |
+
in C,H,W format (with C as RGB). 'seamless' is a bool that should indicates if 'normals_img'
|
214 |
+
is seamless."""
|
215 |
+
|
216 |
+
# Flip height axis
|
217 |
+
flip_img = np.flip(normals_img, axis=1)
|
218 |
+
|
219 |
+
# Get gradients from normal map
|
220 |
+
grad_x, grad_y = normals_to_grad(flip_img)
|
221 |
+
grad_x = np.flip(grad_x, axis=0)
|
222 |
+
grad_y = np.flip(grad_y, axis=0)
|
223 |
+
|
224 |
+
# If non-seamless chosen, expand gradients
|
225 |
+
if not seamless:
|
226 |
+
grad_x, grad_y = copy_flip(grad_x, grad_y)
|
227 |
+
|
228 |
+
# Compute height
|
229 |
+
pred_img = frankot_chellappa(-grad_x, grad_y, progress_callback=progress_callback)
|
230 |
+
|
231 |
+
# Cut to valid part if gradients were expanded
|
232 |
+
if not seamless:
|
233 |
+
height, width = normals_img.shape[1], normals_img.shape[2]
|
234 |
+
pred_img = pred_img[:height, :width]
|
235 |
+
|
236 |
+
# Expand single channel the three channels (RGB)
|
237 |
+
return np.stack([pred_img, pred_img, pred_img])
|
238 |
+
|
239 |
+
|
240 |
+
# - ADDON
|
241 |
+
class DeepBump:
|
242 |
+
"""Normal & height maps generation from single pictures"""
|
243 |
+
|
244 |
+
@classmethod
|
245 |
+
def INPUT_TYPES(cls):
|
246 |
+
return {
|
247 |
+
"required": {
|
248 |
+
"image": ("IMAGE",),
|
249 |
+
"mode": (
|
250 |
+
["Color to Normals", "Normals to Curvature", "Normals to Height"],
|
251 |
+
),
|
252 |
+
"color_to_normals_overlap": (["SMALL", "MEDIUM", "LARGE"],),
|
253 |
+
"normals_to_curvature_blur_radius": (
|
254 |
+
[
|
255 |
+
"SMALLEST",
|
256 |
+
"SMALLER",
|
257 |
+
"SMALL",
|
258 |
+
"MEDIUM",
|
259 |
+
"LARGE",
|
260 |
+
"LARGER",
|
261 |
+
"LARGEST",
|
262 |
+
],
|
263 |
+
),
|
264 |
+
"normals_to_height_seamless": ("BOOLEAN", {"default": False}),
|
265 |
+
},
|
266 |
+
}
|
267 |
+
|
268 |
+
RETURN_TYPES = ("IMAGE",)
|
269 |
+
FUNCTION = "apply"
|
270 |
+
|
271 |
+
CATEGORY = "mtb/textures"
|
272 |
+
|
273 |
+
def apply(
|
274 |
+
self,
|
275 |
+
image,
|
276 |
+
mode="Color to Normals",
|
277 |
+
color_to_normals_overlap="SMALL",
|
278 |
+
normals_to_curvature_blur_radius="SMALL",
|
279 |
+
normals_to_height_seamless=True,
|
280 |
+
):
|
281 |
+
image = utils_inference.tensor2pil(image)
|
282 |
+
|
283 |
+
in_img = np.transpose(image, (2, 0, 1)) / 255
|
284 |
+
|
285 |
+
log.debug(f"Input image shape: {in_img.shape}")
|
286 |
+
|
287 |
+
# Apply processing
|
288 |
+
if mode == "Color to Normals":
|
289 |
+
out_img = color_to_normals(in_img, color_to_normals_overlap, None)
|
290 |
+
if mode == "Normals to Curvature":
|
291 |
+
out_img = normals_to_curvature(
|
292 |
+
in_img, normals_to_curvature_blur_radius, None
|
293 |
+
)
|
294 |
+
if mode == "Normals to Height":
|
295 |
+
out_img = normals_to_height(in_img, normals_to_height_seamless, None)
|
296 |
+
|
297 |
+
out_img = (np.transpose(out_img, (1, 2, 0)) * 255).astype(np.uint8)
|
298 |
+
|
299 |
+
return (utils_inference.pil2tensor(out_img),)
|
300 |
+
|
301 |
+
|
302 |
+
__nodes__ = [DeepBump]
|
comfy_mtb/nodes/faceenhance.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from gfpgan import GFPGANer
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import os
|
5 |
+
from pathlib import Path
|
6 |
+
import folder_paths
|
7 |
+
from ..utils import pil2tensor, np2tensor, tensor2np
|
8 |
+
|
9 |
+
from basicsr.utils import imwrite
|
10 |
+
|
11 |
+
|
12 |
+
from PIL import Image
|
13 |
+
import torch
|
14 |
+
from ..log import NullWriter, log
|
15 |
+
from comfy import model_management
|
16 |
+
import comfy
|
17 |
+
import comfy.utils
|
18 |
+
from typing import Tuple
|
19 |
+
|
20 |
+
|
21 |
+
class LoadFaceEnhanceModel:
|
22 |
+
"""Loads a GFPGan or RestoreFormer model for face enhancement."""
|
23 |
+
|
24 |
+
def __init__(self) -> None:
|
25 |
+
pass
|
26 |
+
|
27 |
+
@classmethod
|
28 |
+
def get_models_root(cls):
|
29 |
+
fr = Path(folder_paths.models_dir) / "face_restore"
|
30 |
+
if fr.exists():
|
31 |
+
return (fr, None)
|
32 |
+
|
33 |
+
um = Path(folder_paths.models_dir) / "upscale_models"
|
34 |
+
return (fr, um) if um.exists() else (None, None)
|
35 |
+
|
36 |
+
@classmethod
|
37 |
+
def get_models(cls):
|
38 |
+
fr_models_path, um_models_path = cls.get_models_root()
|
39 |
+
|
40 |
+
if fr_models_path is None and um_models_path is None:
|
41 |
+
log.warning("Face restoration models not found.")
|
42 |
+
return []
|
43 |
+
if not fr_models_path.exists():
|
44 |
+
log.warning(
|
45 |
+
f"No Face Restore checkpoints found at {fr_models_path} (if you've used mtb before these checkpoints were saved in upscale_models before)"
|
46 |
+
)
|
47 |
+
log.warning(
|
48 |
+
"For now we fallback to upscale_models but this will be removed in a future version"
|
49 |
+
)
|
50 |
+
if um_models_path.exists():
|
51 |
+
return [
|
52 |
+
x
|
53 |
+
for x in um_models_path.iterdir()
|
54 |
+
if x.name.endswith(".pth")
|
55 |
+
and ("GFPGAN" in x.name or "RestoreFormer" in x.name)
|
56 |
+
]
|
57 |
+
return []
|
58 |
+
|
59 |
+
return [
|
60 |
+
x
|
61 |
+
for x in fr_models_path.iterdir()
|
62 |
+
if x.name.endswith(".pth")
|
63 |
+
and ("GFPGAN" in x.name or "RestoreFormer" in x.name)
|
64 |
+
]
|
65 |
+
|
66 |
+
@classmethod
|
67 |
+
def INPUT_TYPES(cls):
|
68 |
+
return {
|
69 |
+
"required": {
|
70 |
+
"model_name": (
|
71 |
+
[x.name for x in cls.get_models()],
|
72 |
+
{"default": "None"},
|
73 |
+
),
|
74 |
+
"upscale": ("INT", {"default": 1}),
|
75 |
+
},
|
76 |
+
"optional": {"bg_upsampler": ("UPSCALE_MODEL", {"default": None})},
|
77 |
+
}
|
78 |
+
|
79 |
+
RETURN_TYPES = ("FACEENHANCE_MODEL",)
|
80 |
+
RETURN_NAMES = ("model",)
|
81 |
+
FUNCTION = "load_model"
|
82 |
+
CATEGORY = "mtb/facetools"
|
83 |
+
|
84 |
+
def load_model(self, model_name, upscale=2, bg_upsampler=None):
|
85 |
+
basic = "RestoreFormer" not in model_name
|
86 |
+
|
87 |
+
fr_root, um_root = self.get_models_root()
|
88 |
+
|
89 |
+
if bg_upsampler is not None:
|
90 |
+
log.warning(
|
91 |
+
f"Upscale value overridden to {bg_upsampler.scale} from bg_upsampler"
|
92 |
+
)
|
93 |
+
upscale = bg_upsampler.scale
|
94 |
+
bg_upsampler = BGUpscaleWrapper(bg_upsampler)
|
95 |
+
|
96 |
+
sys.stdout = NullWriter()
|
97 |
+
model = GFPGANer(
|
98 |
+
model_path=(
|
99 |
+
(fr_root if fr_root.exists() else um_root) / model_name
|
100 |
+
).as_posix(),
|
101 |
+
upscale=upscale,
|
102 |
+
arch="clean" if basic else "RestoreFormer", # or original for v1.0 only
|
103 |
+
channel_multiplier=2, # 1 for v1.0 only
|
104 |
+
bg_upsampler=bg_upsampler,
|
105 |
+
)
|
106 |
+
|
107 |
+
sys.stdout = sys.__stdout__
|
108 |
+
return (model,)
|
109 |
+
|
110 |
+
|
111 |
+
class BGUpscaleWrapper:
|
112 |
+
def __init__(self, upscale_model) -> None:
|
113 |
+
self.upscale_model = upscale_model
|
114 |
+
|
115 |
+
def enhance(self, img: Image.Image, outscale=2):
|
116 |
+
device = model_management.get_torch_device()
|
117 |
+
self.upscale_model.to(device)
|
118 |
+
|
119 |
+
tile = 128 + 64
|
120 |
+
overlap = 8
|
121 |
+
|
122 |
+
imgt = np2tensor(img)
|
123 |
+
imgt = imgt.movedim(-1, -3).to(device)
|
124 |
+
|
125 |
+
steps = imgt.shape[0] * comfy.utils.get_tiled_scale_steps(
|
126 |
+
imgt.shape[3], imgt.shape[2], tile_x=tile, tile_y=tile, overlap=overlap
|
127 |
+
)
|
128 |
+
|
129 |
+
log.debug(f"Steps: {steps}")
|
130 |
+
|
131 |
+
pbar = comfy.utils.ProgressBar(steps)
|
132 |
+
|
133 |
+
s = comfy.utils.tiled_scale(
|
134 |
+
imgt,
|
135 |
+
lambda a: self.upscale_model(a),
|
136 |
+
tile_x=tile,
|
137 |
+
tile_y=tile,
|
138 |
+
overlap=overlap,
|
139 |
+
upscale_amount=self.upscale_model.scale,
|
140 |
+
pbar=pbar,
|
141 |
+
)
|
142 |
+
|
143 |
+
self.upscale_model.cpu()
|
144 |
+
s = torch.clamp(s.movedim(-3, -1), min=0, max=1.0)
|
145 |
+
return (tensor2np(s)[0],)
|
146 |
+
|
147 |
+
|
148 |
+
import sys
|
149 |
+
|
150 |
+
|
151 |
+
class RestoreFace:
|
152 |
+
"""Uses GFPGan to restore faces"""
|
153 |
+
|
154 |
+
def __init__(self) -> None:
|
155 |
+
pass
|
156 |
+
|
157 |
+
RETURN_TYPES = ("IMAGE",)
|
158 |
+
FUNCTION = "restore"
|
159 |
+
CATEGORY = "mtb/facetools"
|
160 |
+
|
161 |
+
@classmethod
|
162 |
+
def INPUT_TYPES(cls):
|
163 |
+
return {
|
164 |
+
"required": {
|
165 |
+
"image": ("IMAGE",),
|
166 |
+
"model": ("FACEENHANCE_MODEL",),
|
167 |
+
# Input are aligned faces
|
168 |
+
"aligned": ("BOOLEAN", {"default": False}),
|
169 |
+
# Only restore the center face
|
170 |
+
"only_center_face": ("BOOLEAN", {"default": False}),
|
171 |
+
# Adjustable weights
|
172 |
+
"weight": ("FLOAT", {"default": 0.5}),
|
173 |
+
"save_tmp_steps": ("BOOLEAN", {"default": True}),
|
174 |
+
}
|
175 |
+
}
|
176 |
+
|
177 |
+
def do_restore(
|
178 |
+
self,
|
179 |
+
image: torch.Tensor,
|
180 |
+
model: GFPGANer,
|
181 |
+
aligned,
|
182 |
+
only_center_face,
|
183 |
+
weight,
|
184 |
+
save_tmp_steps,
|
185 |
+
) -> torch.Tensor:
|
186 |
+
pimage = tensor2np(image)[0]
|
187 |
+
width, height = pimage.shape[1], pimage.shape[0]
|
188 |
+
source_img = cv2.cvtColor(np.array(pimage), cv2.COLOR_RGB2BGR)
|
189 |
+
|
190 |
+
sys.stdout = NullWriter()
|
191 |
+
cropped_faces, restored_faces, restored_img = model.enhance(
|
192 |
+
source_img,
|
193 |
+
has_aligned=aligned,
|
194 |
+
only_center_face=only_center_face,
|
195 |
+
paste_back=True,
|
196 |
+
# TODO: weight has no effect in 1.3 and 1.4 (only tested these for now...)
|
197 |
+
weight=weight,
|
198 |
+
)
|
199 |
+
sys.stdout = sys.__stdout__
|
200 |
+
log.warning(f"Weight value has no effect for now. (value: {weight})")
|
201 |
+
|
202 |
+
if save_tmp_steps:
|
203 |
+
self.save_intermediate_images(cropped_faces, restored_faces, height, width)
|
204 |
+
output = None
|
205 |
+
if restored_img is not None:
|
206 |
+
output = Image.fromarray(cv2.cvtColor(restored_img, cv2.COLOR_BGR2RGB))
|
207 |
+
# imwrite(restored_img, save_restore_path)
|
208 |
+
|
209 |
+
return pil2tensor(output)
|
210 |
+
|
211 |
+
def restore(
|
212 |
+
self,
|
213 |
+
image: torch.Tensor,
|
214 |
+
model: GFPGANer,
|
215 |
+
aligned=False,
|
216 |
+
only_center_face=False,
|
217 |
+
weight=0.5,
|
218 |
+
save_tmp_steps=True,
|
219 |
+
) -> Tuple[torch.Tensor]:
|
220 |
+
out = [
|
221 |
+
self.do_restore(
|
222 |
+
image[i], model, aligned, only_center_face, weight, save_tmp_steps
|
223 |
+
)
|
224 |
+
for i in range(image.size(0))
|
225 |
+
]
|
226 |
+
|
227 |
+
return (torch.cat(out, dim=0),)
|
228 |
+
|
229 |
+
def get_step_image_path(self, step, idx):
|
230 |
+
(
|
231 |
+
full_output_folder,
|
232 |
+
filename,
|
233 |
+
counter,
|
234 |
+
_subfolder,
|
235 |
+
_filename_prefix,
|
236 |
+
) = folder_paths.get_save_image_path(
|
237 |
+
f"{step}_{idx:03}",
|
238 |
+
folder_paths.temp_directory,
|
239 |
+
)
|
240 |
+
file = f"{filename}_{counter:05}_.png"
|
241 |
+
|
242 |
+
return os.path.join(full_output_folder, file)
|
243 |
+
|
244 |
+
def save_intermediate_images(self, cropped_faces, restored_faces, height, width):
|
245 |
+
for idx, (cropped_face, restored_face) in enumerate(
|
246 |
+
zip(cropped_faces, restored_faces)
|
247 |
+
):
|
248 |
+
face_id = idx + 1
|
249 |
+
file = self.get_step_image_path("cropped_faces", face_id)
|
250 |
+
imwrite(cropped_face, file)
|
251 |
+
|
252 |
+
file = self.get_step_image_path("cropped_faces_restored", face_id)
|
253 |
+
imwrite(restored_face, file)
|
254 |
+
|
255 |
+
file = self.get_step_image_path("cropped_faces_compare", face_id)
|
256 |
+
|
257 |
+
# save comparison image
|
258 |
+
cmp_img = np.concatenate((cropped_face, restored_face), axis=1)
|
259 |
+
imwrite(cmp_img, file)
|
260 |
+
|
261 |
+
|
262 |
+
__nodes__ = [RestoreFace, LoadFaceEnhanceModel]
|
comfy_mtb/nodes/faceswap.py
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# region imports
|
2 |
+
import onnxruntime
|
3 |
+
from pathlib import Path
|
4 |
+
from PIL import Image
|
5 |
+
from typing import List, Set, Union, Optional
|
6 |
+
import cv2
|
7 |
+
import folder_paths
|
8 |
+
import glob
|
9 |
+
import insightface
|
10 |
+
import numpy as np
|
11 |
+
import os
|
12 |
+
import torch
|
13 |
+
from insightface.model_zoo.inswapper import INSwapper
|
14 |
+
from ..utils import pil2tensor, tensor2pil, download_antelopev2
|
15 |
+
from ..log import mklog, NullWriter
|
16 |
+
import sys
|
17 |
+
import comfy.model_management as model_management
|
18 |
+
|
19 |
+
|
20 |
+
# endregion
|
21 |
+
|
22 |
+
log = mklog(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
class LoadFaceAnalysisModel:
|
26 |
+
"""Loads a face analysis model"""
|
27 |
+
|
28 |
+
models = []
|
29 |
+
|
30 |
+
@staticmethod
|
31 |
+
def get_models() -> List[str]:
|
32 |
+
models_path = os.path.join(folder_paths.models_dir, "insightface/*")
|
33 |
+
models = glob.glob(models_path)
|
34 |
+
models = [
|
35 |
+
Path(x).name for x in models if x.endswith(".onnx") or x.endswith(".pth")
|
36 |
+
]
|
37 |
+
return models
|
38 |
+
|
39 |
+
@classmethod
|
40 |
+
def INPUT_TYPES(cls):
|
41 |
+
return {
|
42 |
+
"required": {
|
43 |
+
"faceswap_model": (
|
44 |
+
["antelopev2", "buffalo_l", "buffalo_m", "buffalo_sc"],
|
45 |
+
{"default": "buffalo_l"},
|
46 |
+
),
|
47 |
+
},
|
48 |
+
}
|
49 |
+
|
50 |
+
RETURN_TYPES = ("FACE_ANALYSIS_MODEL",)
|
51 |
+
FUNCTION = "load_model"
|
52 |
+
CATEGORY = "mtb/facetools"
|
53 |
+
|
54 |
+
def load_model(self, faceswap_model: str):
|
55 |
+
if faceswap_model == "antelopev2":
|
56 |
+
download_antelopev2()
|
57 |
+
|
58 |
+
face_analyser = insightface.app.FaceAnalysis(
|
59 |
+
name=faceswap_model,
|
60 |
+
root=os.path.join(folder_paths.models_dir, "insightface"),
|
61 |
+
)
|
62 |
+
return (face_analyser,)
|
63 |
+
|
64 |
+
|
65 |
+
class LoadFaceSwapModel:
|
66 |
+
"""Loads a faceswap model"""
|
67 |
+
|
68 |
+
@staticmethod
|
69 |
+
def get_models() -> List[Path]:
|
70 |
+
models_path = os.path.join(folder_paths.models_dir, "insightface/*")
|
71 |
+
models = glob.glob(models_path)
|
72 |
+
models = [Path(x) for x in models if x.endswith(".onnx") or x.endswith(".pth")]
|
73 |
+
return models
|
74 |
+
|
75 |
+
@classmethod
|
76 |
+
def INPUT_TYPES(cls):
|
77 |
+
return {
|
78 |
+
"required": {
|
79 |
+
"faceswap_model": (
|
80 |
+
[x.name for x in cls.get_models()],
|
81 |
+
{"default": "None"},
|
82 |
+
),
|
83 |
+
},
|
84 |
+
}
|
85 |
+
|
86 |
+
RETURN_TYPES = ("FACESWAP_MODEL",)
|
87 |
+
FUNCTION = "load_model"
|
88 |
+
CATEGORY = "mtb/facetools"
|
89 |
+
|
90 |
+
def load_model(self, faceswap_model: str):
|
91 |
+
model_path = os.path.join(
|
92 |
+
folder_paths.models_dir, "insightface", faceswap_model
|
93 |
+
)
|
94 |
+
log.info(f"Loading model {model_path}")
|
95 |
+
return (
|
96 |
+
INSwapper(
|
97 |
+
model_path,
|
98 |
+
onnxruntime.InferenceSession(
|
99 |
+
path_or_bytes=model_path,
|
100 |
+
providers=onnxruntime.get_available_providers(),
|
101 |
+
),
|
102 |
+
),
|
103 |
+
)
|
104 |
+
|
105 |
+
|
106 |
+
# region roop node
|
107 |
+
class FaceSwap:
|
108 |
+
"""Face swap using deepinsight/insightface models"""
|
109 |
+
|
110 |
+
model = None
|
111 |
+
model_path = None
|
112 |
+
|
113 |
+
def __init__(self) -> None:
|
114 |
+
pass
|
115 |
+
|
116 |
+
@classmethod
|
117 |
+
def INPUT_TYPES(cls):
|
118 |
+
return {
|
119 |
+
"required": {
|
120 |
+
"image": ("IMAGE",),
|
121 |
+
"reference": ("IMAGE",),
|
122 |
+
"faces_index": ("STRING", {"default": "0"}),
|
123 |
+
"faceanalysis_model": ("FACE_ANALYSIS_MODEL", {"default": "None"}),
|
124 |
+
"faceswap_model": ("FACESWAP_MODEL", {"default": "None"}),
|
125 |
+
},
|
126 |
+
"optional": {},
|
127 |
+
}
|
128 |
+
|
129 |
+
RETURN_TYPES = ("IMAGE",)
|
130 |
+
FUNCTION = "swap"
|
131 |
+
CATEGORY = "mtb/facetools"
|
132 |
+
|
133 |
+
def swap(
|
134 |
+
self,
|
135 |
+
image: torch.Tensor,
|
136 |
+
reference: torch.Tensor,
|
137 |
+
faces_index: str,
|
138 |
+
faceanalysis_model,
|
139 |
+
faceswap_model,
|
140 |
+
):
|
141 |
+
def do_swap(img):
|
142 |
+
model_management.throw_exception_if_processing_interrupted()
|
143 |
+
img = tensor2pil(img)[0]
|
144 |
+
ref = tensor2pil(reference)[0]
|
145 |
+
face_ids = {
|
146 |
+
int(x) for x in faces_index.strip(",").split(",") if x.isnumeric()
|
147 |
+
}
|
148 |
+
sys.stdout = NullWriter()
|
149 |
+
swapped = swap_face(faceanalysis_model, ref, img, faceswap_model, face_ids)
|
150 |
+
sys.stdout = sys.__stdout__
|
151 |
+
return pil2tensor(swapped)
|
152 |
+
|
153 |
+
batch_count = image.size(0)
|
154 |
+
|
155 |
+
log.info(f"Running insightface swap (batch size: {batch_count})")
|
156 |
+
|
157 |
+
if reference.size(0) != 1:
|
158 |
+
raise ValueError("Reference image must have batch size 1")
|
159 |
+
if batch_count == 1:
|
160 |
+
image = do_swap(image)
|
161 |
+
|
162 |
+
else:
|
163 |
+
image_batch = [do_swap(image[i]) for i in range(batch_count)]
|
164 |
+
image = torch.cat(image_batch, dim=0)
|
165 |
+
|
166 |
+
return (image,)
|
167 |
+
|
168 |
+
|
169 |
+
# endregion
|
170 |
+
|
171 |
+
|
172 |
+
# region face swap utils
|
173 |
+
def get_face_single(
|
174 |
+
face_analyser, img_data: np.ndarray, face_index=0, det_size=(640, 640)
|
175 |
+
):
|
176 |
+
face_analyser.prepare(ctx_id=0, det_size=det_size)
|
177 |
+
face = face_analyser.get(img_data)
|
178 |
+
|
179 |
+
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
180 |
+
log.debug("No face ed, trying again with smaller image")
|
181 |
+
det_size_half = (det_size[0] // 2, det_size[1] // 2)
|
182 |
+
return get_face_single(
|
183 |
+
face_analyser, img_data, face_index=face_index, det_size=det_size_half
|
184 |
+
)
|
185 |
+
|
186 |
+
try:
|
187 |
+
return sorted(face, key=lambda x: x.bbox[0])[face_index]
|
188 |
+
except IndexError:
|
189 |
+
return None
|
190 |
+
|
191 |
+
|
192 |
+
def swap_face(
|
193 |
+
face_analyser,
|
194 |
+
source_img: Union[Image.Image, List[Image.Image]],
|
195 |
+
target_img: Union[Image.Image, List[Image.Image]],
|
196 |
+
face_swapper_model,
|
197 |
+
faces_index: Optional[Set[int]] = None,
|
198 |
+
) -> Image.Image:
|
199 |
+
if faces_index is None:
|
200 |
+
faces_index = {0}
|
201 |
+
log.debug(f"Swapping faces: {faces_index}")
|
202 |
+
result_image = target_img
|
203 |
+
|
204 |
+
if face_swapper_model is not None:
|
205 |
+
cv_source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
|
206 |
+
cv_target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
207 |
+
source_face = get_face_single(face_analyser, cv_source_img, face_index=0)
|
208 |
+
if source_face is not None:
|
209 |
+
result = cv_target_img
|
210 |
+
|
211 |
+
for face_num in faces_index:
|
212 |
+
target_face = get_face_single(
|
213 |
+
face_analyser, cv_target_img, face_index=face_num
|
214 |
+
)
|
215 |
+
if target_face is not None:
|
216 |
+
sys.stdout = NullWriter()
|
217 |
+
result = face_swapper_model.get(result, target_face, source_face)
|
218 |
+
sys.stdout = sys.__stdout__
|
219 |
+
else:
|
220 |
+
log.warning(f"No target face found for {face_num}")
|
221 |
+
|
222 |
+
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
223 |
+
else:
|
224 |
+
log.warning("No source face found")
|
225 |
+
else:
|
226 |
+
log.error("No face swap model provided")
|
227 |
+
return result_image
|
228 |
+
|
229 |
+
|
230 |
+
# endregion face swap utils
|
231 |
+
|
232 |
+
|
233 |
+
__nodes__ = [FaceSwap, LoadFaceSwapModel, LoadFaceAnalysisModel]
|
comfy_mtb/nodes/generate.py
ADDED
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import qrcode
|
2 |
+
from ..utils import pil2tensor
|
3 |
+
from ..utils import comfy_dir
|
4 |
+
from typing import cast
|
5 |
+
from PIL import Image
|
6 |
+
from ..log import log
|
7 |
+
|
8 |
+
# class MtbExamples:
|
9 |
+
# """MTB Example Images"""
|
10 |
+
|
11 |
+
# def __init__(self):
|
12 |
+
# pass
|
13 |
+
|
14 |
+
# @classmethod
|
15 |
+
# @lru_cache(maxsize=1)
|
16 |
+
# def get_root(cls):
|
17 |
+
# return here / "examples" / "samples"
|
18 |
+
|
19 |
+
# @classmethod
|
20 |
+
# def INPUT_TYPES(cls):
|
21 |
+
# input_dir = cls.get_root()
|
22 |
+
# files = [f.name for f in input_dir.iterdir() if f.is_file()]
|
23 |
+
# return {
|
24 |
+
# "required": {"image": (sorted(files),)},
|
25 |
+
# }
|
26 |
+
|
27 |
+
# RETURN_TYPES = ("IMAGE", "MASK")
|
28 |
+
# FUNCTION = "do_mtb_examples"
|
29 |
+
# CATEGORY = "fun"
|
30 |
+
|
31 |
+
# def do_mtb_examples(self, image, index):
|
32 |
+
# image_path = (self.get_root() / image).as_posix()
|
33 |
+
|
34 |
+
# i = Image.open(image_path)
|
35 |
+
# i = ImageOps.exif_transpose(i)
|
36 |
+
# image = i.convert("RGB")
|
37 |
+
# image = np.array(image).astype(np.float32) / 255.0
|
38 |
+
# image = torch.from_numpy(image)[None,]
|
39 |
+
# if "A" in i.getbands():
|
40 |
+
# mask = np.array(i.getchannel("A")).astype(np.float32) / 255.0
|
41 |
+
# mask = 1.0 - torch.from_numpy(mask)
|
42 |
+
# else:
|
43 |
+
# mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
44 |
+
# return (image, mask)
|
45 |
+
|
46 |
+
# @classmethod
|
47 |
+
# def IS_CHANGED(cls, image):
|
48 |
+
# image_path = (cls.get_root() / image).as_posix()
|
49 |
+
|
50 |
+
# m = hashlib.sha256()
|
51 |
+
# with open(image_path, "rb") as f:
|
52 |
+
# m.update(f.read())
|
53 |
+
# return m.digest().hex()
|
54 |
+
|
55 |
+
|
56 |
+
class UnsplashImage:
|
57 |
+
"""Unsplash Image given a keyword and a size"""
|
58 |
+
|
59 |
+
@classmethod
|
60 |
+
def INPUT_TYPES(cls):
|
61 |
+
return {
|
62 |
+
"required": {
|
63 |
+
"width": ("INT", {"default": 512, "max": 8096, "min": 0, "step": 1}),
|
64 |
+
"height": ("INT", {"default": 512, "max": 8096, "min": 0, "step": 1}),
|
65 |
+
"random_seed": ("INT", {"default": 0, "max": 1e5, "min": 0, "step": 1}),
|
66 |
+
},
|
67 |
+
"optional": {
|
68 |
+
"keyword": ("STRING", {"default": "nature"}),
|
69 |
+
},
|
70 |
+
}
|
71 |
+
|
72 |
+
RETURN_TYPES = ("IMAGE",)
|
73 |
+
FUNCTION = "do_unsplash_image"
|
74 |
+
CATEGORY = "mtb/generate"
|
75 |
+
|
76 |
+
def do_unsplash_image(self, width, height, random_seed, keyword=None):
|
77 |
+
import requests
|
78 |
+
import io
|
79 |
+
|
80 |
+
base_url = "https://source.unsplash.com/random/"
|
81 |
+
|
82 |
+
if width and height:
|
83 |
+
base_url += f"/{width}x{height}"
|
84 |
+
|
85 |
+
if keyword:
|
86 |
+
keyword = keyword.replace(" ", "%20")
|
87 |
+
base_url += f"?{keyword}&{random_seed}"
|
88 |
+
else:
|
89 |
+
base_url += f"?&{random_seed}"
|
90 |
+
try:
|
91 |
+
log.debug(f"Getting unsplash image from {base_url}")
|
92 |
+
response = requests.get(base_url)
|
93 |
+
response.raise_for_status()
|
94 |
+
|
95 |
+
image = Image.open(io.BytesIO(response.content))
|
96 |
+
return (
|
97 |
+
pil2tensor(
|
98 |
+
image,
|
99 |
+
),
|
100 |
+
)
|
101 |
+
|
102 |
+
except requests.exceptions.RequestException as e:
|
103 |
+
print("Error retrieving image:", e)
|
104 |
+
return (None,)
|
105 |
+
|
106 |
+
|
107 |
+
class QrCode:
|
108 |
+
"""Basic QR Code generator"""
|
109 |
+
|
110 |
+
@classmethod
|
111 |
+
def INPUT_TYPES(cls):
|
112 |
+
return {
|
113 |
+
"required": {
|
114 |
+
"url": ("STRING", {"default": "https://www.github.com"}),
|
115 |
+
"width": (
|
116 |
+
"INT",
|
117 |
+
{"default": 256, "max": 8096, "min": 0, "step": 1},
|
118 |
+
),
|
119 |
+
"height": (
|
120 |
+
"INT",
|
121 |
+
{"default": 256, "max": 8096, "min": 0, "step": 1},
|
122 |
+
),
|
123 |
+
"error_correct": (("L", "M", "Q", "H"), {"default": "L"}),
|
124 |
+
"box_size": ("INT", {"default": 10, "max": 8096, "min": 0, "step": 1}),
|
125 |
+
"border": ("INT", {"default": 4, "max": 8096, "min": 0, "step": 1}),
|
126 |
+
"invert": (("BOOLEAN",), {"default": False}),
|
127 |
+
}
|
128 |
+
}
|
129 |
+
|
130 |
+
RETURN_TYPES = ("IMAGE",)
|
131 |
+
FUNCTION = "do_qr"
|
132 |
+
CATEGORY = "mtb/generate"
|
133 |
+
|
134 |
+
def do_qr(self, url, width, height, error_correct, box_size, border, invert):
|
135 |
+
log.warning(
|
136 |
+
"This node will soon be deprecated, there are much better alternatives like https://github.com/coreyryanhanson/comfy-qr"
|
137 |
+
)
|
138 |
+
if error_correct == "L" or error_correct not in ["M", "Q", "H"]:
|
139 |
+
error_correct = qrcode.constants.ERROR_CORRECT_L
|
140 |
+
elif error_correct == "M":
|
141 |
+
error_correct = qrcode.constants.ERROR_CORRECT_M
|
142 |
+
elif error_correct == "Q":
|
143 |
+
error_correct = qrcode.constants.ERROR_CORRECT_Q
|
144 |
+
else:
|
145 |
+
error_correct = qrcode.constants.ERROR_CORRECT_H
|
146 |
+
|
147 |
+
qr = qrcode.QRCode(
|
148 |
+
version=1,
|
149 |
+
error_correction=error_correct,
|
150 |
+
box_size=box_size,
|
151 |
+
border=border,
|
152 |
+
)
|
153 |
+
qr.add_data(url)
|
154 |
+
qr.make(fit=True)
|
155 |
+
|
156 |
+
back_color = (255, 255, 255) if invert else (0, 0, 0)
|
157 |
+
fill_color = (0, 0, 0) if invert else (255, 255, 255)
|
158 |
+
|
159 |
+
code = img = qr.make_image(back_color=back_color, fill_color=fill_color)
|
160 |
+
|
161 |
+
# that we now resize without filtering
|
162 |
+
code = code.resize((width, height), Image.NEAREST)
|
163 |
+
|
164 |
+
return (pil2tensor(code),)
|
165 |
+
|
166 |
+
|
167 |
+
def bbox_dim(bbox):
|
168 |
+
left, upper, right, lower = bbox
|
169 |
+
width = right - left
|
170 |
+
height = lower - upper
|
171 |
+
return width, height
|
172 |
+
|
173 |
+
|
174 |
+
class TextToImage:
|
175 |
+
"""Utils to convert text to image using a font
|
176 |
+
|
177 |
+
|
178 |
+
The tool looks for any .ttf file in the Comfy folder hierarchy.
|
179 |
+
"""
|
180 |
+
|
181 |
+
fonts = {}
|
182 |
+
|
183 |
+
def __init__(self):
|
184 |
+
# - This is executed when the graph is executed, we could conditionaly reload fonts there
|
185 |
+
pass
|
186 |
+
|
187 |
+
@classmethod
|
188 |
+
def CACHE_FONTS(cls):
|
189 |
+
font_extensions = ["*.ttf", "*.otf", "*.woff", "*.woff2", "*.eot"]
|
190 |
+
fonts = []
|
191 |
+
|
192 |
+
for extension in font_extensions:
|
193 |
+
fonts.extend(comfy_dir.glob(f"**/{extension}"))
|
194 |
+
|
195 |
+
if not fonts:
|
196 |
+
log.warn(
|
197 |
+
"> No fonts found in the comfy folder, place at least one font file somewhere in ComfyUI's hierarchy"
|
198 |
+
)
|
199 |
+
else:
|
200 |
+
log.debug(f"> Found {len(fonts)} fonts")
|
201 |
+
|
202 |
+
for font in fonts:
|
203 |
+
log.debug(f"Adding font {font}")
|
204 |
+
cls.fonts[font.stem] = font.as_posix()
|
205 |
+
|
206 |
+
@classmethod
|
207 |
+
def INPUT_TYPES(cls):
|
208 |
+
if not cls.fonts:
|
209 |
+
cls.CACHE_FONTS()
|
210 |
+
else:
|
211 |
+
log.debug(f"Using cached fonts (count: {len(cls.fonts)})")
|
212 |
+
return {
|
213 |
+
"required": {
|
214 |
+
"text": (
|
215 |
+
"STRING",
|
216 |
+
{"default": "Hello world!"},
|
217 |
+
),
|
218 |
+
"font": ((sorted(cls.fonts.keys())),),
|
219 |
+
"wrap": (
|
220 |
+
"INT",
|
221 |
+
{"default": 120, "min": 0, "max": 8096, "step": 1},
|
222 |
+
),
|
223 |
+
"font_size": (
|
224 |
+
"INT",
|
225 |
+
{"default": 12, "min": 1, "max": 2500, "step": 1},
|
226 |
+
),
|
227 |
+
"width": (
|
228 |
+
"INT",
|
229 |
+
{"default": 512, "min": 1, "max": 8096, "step": 1},
|
230 |
+
),
|
231 |
+
"height": (
|
232 |
+
"INT",
|
233 |
+
{"default": 512, "min": 1, "max": 8096, "step": 1},
|
234 |
+
),
|
235 |
+
# "position": (["INT"], {"default": 0, "min": 0, "max": 100, "step": 1}),
|
236 |
+
"color": (
|
237 |
+
"COLOR",
|
238 |
+
{"default": "black"},
|
239 |
+
),
|
240 |
+
"background": (
|
241 |
+
"COLOR",
|
242 |
+
{"default": "white"},
|
243 |
+
),
|
244 |
+
}
|
245 |
+
}
|
246 |
+
|
247 |
+
RETURN_TYPES = ("IMAGE",)
|
248 |
+
RETURN_NAMES = ("image",)
|
249 |
+
FUNCTION = "text_to_image"
|
250 |
+
CATEGORY = "mtb/generate"
|
251 |
+
|
252 |
+
def text_to_image(
|
253 |
+
self, text, font, wrap, font_size, width, height, color, background
|
254 |
+
):
|
255 |
+
from PIL import Image, ImageDraw, ImageFont
|
256 |
+
import textwrap
|
257 |
+
|
258 |
+
font = self.fonts[font]
|
259 |
+
font = cast(ImageFont.FreeTypeFont, ImageFont.truetype(font, font_size))
|
260 |
+
if wrap == 0:
|
261 |
+
wrap = width / font_size
|
262 |
+
lines = textwrap.wrap(text, width=wrap)
|
263 |
+
log.debug(f"Lines: {lines}")
|
264 |
+
line_height = bbox_dim(font.getbbox("hg"))[1]
|
265 |
+
img_height = height # line_height * len(lines)
|
266 |
+
img_width = width # max(font.getsize(line)[0] for line in lines)
|
267 |
+
|
268 |
+
img = Image.new("RGBA", (img_width, img_height), background)
|
269 |
+
draw = ImageDraw.Draw(img)
|
270 |
+
y_text = 0
|
271 |
+
# - bbox is [left, upper, right, lower]
|
272 |
+
for line in lines:
|
273 |
+
width, height = bbox_dim(font.getbbox(line))
|
274 |
+
draw.text((0, y_text), line, color, font=font)
|
275 |
+
y_text += height
|
276 |
+
|
277 |
+
# img.save(os.path.join(folder_paths.base_path, f'{str(uuid.uuid4())}.png'))
|
278 |
+
return (pil2tensor(img),)
|
279 |
+
|
280 |
+
|
281 |
+
__nodes__ = [
|
282 |
+
QrCode,
|
283 |
+
UnsplashImage,
|
284 |
+
TextToImage
|
285 |
+
# MtbExamples,
|
286 |
+
]
|
comfy_mtb/nodes/graph_utils.py
ADDED
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..log import log
|
2 |
+
from PIL import Image
|
3 |
+
import urllib.request
|
4 |
+
import urllib.parse
|
5 |
+
import torch
|
6 |
+
import json
|
7 |
+
from ..utils import pil2tensor, apply_easing, get_server_info
|
8 |
+
import io
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
|
12 |
+
def get_image(filename, subfolder, folder_type):
|
13 |
+
log.debug(
|
14 |
+
f"Getting image {filename} from foldertype {folder_type} {f'in subfolder: {subfolder}' if subfolder else ''}"
|
15 |
+
)
|
16 |
+
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
17 |
+
base_url, port = get_server_info()
|
18 |
+
|
19 |
+
url_values = urllib.parse.urlencode(data)
|
20 |
+
url = f"http://{base_url}:{port}/view?{url_values}"
|
21 |
+
log.debug(f"Fetching image from {url}")
|
22 |
+
with urllib.request.urlopen(url) as response:
|
23 |
+
return io.BytesIO(response.read())
|
24 |
+
|
25 |
+
|
26 |
+
class GetBatchFromHistory:
|
27 |
+
"""Very experimental node to load images from the history of the server.
|
28 |
+
|
29 |
+
Queue items without output are ignored in the count."""
|
30 |
+
|
31 |
+
@classmethod
|
32 |
+
def INPUT_TYPES(cls):
|
33 |
+
return {
|
34 |
+
"required": {
|
35 |
+
"enable": ("BOOLEAN", {"default": True}),
|
36 |
+
"count": ("INT", {"default": 1, "min": 0}),
|
37 |
+
"offset": ("INT", {"default": 0, "min": -1e9, "max": 1e9}),
|
38 |
+
"internal_count": ("INT", {"default": 0}),
|
39 |
+
},
|
40 |
+
"optional": {
|
41 |
+
"passthrough_image": ("IMAGE",),
|
42 |
+
},
|
43 |
+
}
|
44 |
+
|
45 |
+
RETURN_TYPES = ("IMAGE",)
|
46 |
+
RETURN_NAMES = ("images",)
|
47 |
+
CATEGORY = "mtb/animation"
|
48 |
+
FUNCTION = "load_from_history"
|
49 |
+
|
50 |
+
def load_from_history(
|
51 |
+
self,
|
52 |
+
enable=True,
|
53 |
+
count=0,
|
54 |
+
offset=0,
|
55 |
+
internal_count=0, # hacky way to invalidate the node
|
56 |
+
passthrough_image=None,
|
57 |
+
):
|
58 |
+
if not enable or count == 0:
|
59 |
+
if passthrough_image is not None:
|
60 |
+
log.debug("Using passthrough image")
|
61 |
+
return (passthrough_image,)
|
62 |
+
log.debug("Load from history is disabled for this iteration")
|
63 |
+
return (torch.zeros(0),)
|
64 |
+
frames = []
|
65 |
+
|
66 |
+
base_url, port = get_server_info()
|
67 |
+
|
68 |
+
history_url = f"http://{base_url}:{port}/history"
|
69 |
+
log.debug(f"Fetching history from {history_url}")
|
70 |
+
output = torch.zeros(0)
|
71 |
+
with urllib.request.urlopen(history_url) as response:
|
72 |
+
output = self.load_batch_frames(response, offset, count, frames)
|
73 |
+
|
74 |
+
if output.size(0) == 0:
|
75 |
+
log.warn("No output found in history")
|
76 |
+
|
77 |
+
return (output,)
|
78 |
+
|
79 |
+
def load_batch_frames(self, response, offset, count, frames):
|
80 |
+
history = json.loads(response.read())
|
81 |
+
|
82 |
+
output_images = []
|
83 |
+
|
84 |
+
for run in history.values():
|
85 |
+
for node_output in run["outputs"].values():
|
86 |
+
if "images" in node_output:
|
87 |
+
for image in node_output["images"]:
|
88 |
+
image_data = get_image(
|
89 |
+
image["filename"], image["subfolder"], image["type"]
|
90 |
+
)
|
91 |
+
output_images.append(image_data)
|
92 |
+
|
93 |
+
if not output_images:
|
94 |
+
return torch.zeros(0)
|
95 |
+
|
96 |
+
# Directly get desired range of images
|
97 |
+
start_index = max(len(output_images) - offset - count, 0)
|
98 |
+
end_index = len(output_images) - offset
|
99 |
+
selected_images = output_images[start_index:end_index]
|
100 |
+
|
101 |
+
frames = [Image.open(image) for image in selected_images]
|
102 |
+
|
103 |
+
if not frames:
|
104 |
+
return torch.zeros(0)
|
105 |
+
elif len(frames) != count:
|
106 |
+
log.warning(f"Expected {count} images, got {len(frames)} instead")
|
107 |
+
|
108 |
+
return pil2tensor(frames)
|
109 |
+
|
110 |
+
|
111 |
+
class AnyToString:
|
112 |
+
"""Tries to take any input and convert it to a string"""
|
113 |
+
|
114 |
+
@classmethod
|
115 |
+
def INPUT_TYPES(cls):
|
116 |
+
return {
|
117 |
+
"required": {"input": ("*")},
|
118 |
+
}
|
119 |
+
|
120 |
+
RETURN_TYPES = ("STRING",)
|
121 |
+
FUNCTION = "do_str"
|
122 |
+
CATEGORY = "mtb/converters"
|
123 |
+
|
124 |
+
def do_str(self, input):
|
125 |
+
if isinstance(input, str):
|
126 |
+
return (input,)
|
127 |
+
elif isinstance(input, torch.Tensor):
|
128 |
+
return (f"Tensor of shape {input.shape} and dtype {input.dtype}",)
|
129 |
+
elif isinstance(input, Image.Image):
|
130 |
+
return (f"PIL Image of size {input.size} and mode {input.mode}",)
|
131 |
+
elif isinstance(input, np.ndarray):
|
132 |
+
return (f"Numpy array of shape {input.shape} and dtype {input.dtype}",)
|
133 |
+
|
134 |
+
elif isinstance(input, dict):
|
135 |
+
return (f"Dictionary of {len(input)} items, with keys {input.keys()}",)
|
136 |
+
|
137 |
+
else:
|
138 |
+
log.debug(f"Falling back to string conversion of {input}")
|
139 |
+
return (str(input),)
|
140 |
+
|
141 |
+
|
142 |
+
class StringReplace:
|
143 |
+
"""Basic string replacement"""
|
144 |
+
|
145 |
+
@classmethod
|
146 |
+
def INPUT_TYPES(cls):
|
147 |
+
return {
|
148 |
+
"required": {
|
149 |
+
"string": ("STRING", {"forceInput": True}),
|
150 |
+
"old": ("STRING", {"default": ""}),
|
151 |
+
"new": ("STRING", {"default": ""}),
|
152 |
+
}
|
153 |
+
}
|
154 |
+
|
155 |
+
FUNCTION = "replace_str"
|
156 |
+
RETURN_TYPES = ("STRING",)
|
157 |
+
CATEGORY = "mtb/string"
|
158 |
+
|
159 |
+
def replace_str(self, string: str, old: str, new: str):
|
160 |
+
log.debug(f"Current string: {string}")
|
161 |
+
log.debug(f"Find string: {old}")
|
162 |
+
log.debug(f"Replace string: {new}")
|
163 |
+
|
164 |
+
string = string.replace(old, new)
|
165 |
+
|
166 |
+
log.debug(f"New string: {string}")
|
167 |
+
|
168 |
+
return (string,)
|
169 |
+
|
170 |
+
|
171 |
+
class FitNumber:
|
172 |
+
"""Fit the input float using a source and target range"""
|
173 |
+
|
174 |
+
@classmethod
|
175 |
+
def INPUT_TYPES(cls):
|
176 |
+
return {
|
177 |
+
"required": {
|
178 |
+
"value": ("FLOAT", {"default": 0, "forceInput": True}),
|
179 |
+
"clamp": ("BOOLEAN", {"default": False}),
|
180 |
+
"source_min": ("FLOAT", {"default": 0.0}),
|
181 |
+
"source_max": ("FLOAT", {"default": 1.0}),
|
182 |
+
"target_min": ("FLOAT", {"default": 0.0}),
|
183 |
+
"target_max": ("FLOAT", {"default": 1.0}),
|
184 |
+
"easing": (
|
185 |
+
[
|
186 |
+
"Linear",
|
187 |
+
"Sine In",
|
188 |
+
"Sine Out",
|
189 |
+
"Sine In/Out",
|
190 |
+
"Quart In",
|
191 |
+
"Quart Out",
|
192 |
+
"Quart In/Out",
|
193 |
+
"Cubic In",
|
194 |
+
"Cubic Out",
|
195 |
+
"Cubic In/Out",
|
196 |
+
"Circ In",
|
197 |
+
"Circ Out",
|
198 |
+
"Circ In/Out",
|
199 |
+
"Back In",
|
200 |
+
"Back Out",
|
201 |
+
"Back In/Out",
|
202 |
+
"Elastic In",
|
203 |
+
"Elastic Out",
|
204 |
+
"Elastic In/Out",
|
205 |
+
"Bounce In",
|
206 |
+
"Bounce Out",
|
207 |
+
"Bounce In/Out",
|
208 |
+
],
|
209 |
+
{"default": "Linear"},
|
210 |
+
),
|
211 |
+
}
|
212 |
+
}
|
213 |
+
|
214 |
+
FUNCTION = "set_range"
|
215 |
+
RETURN_TYPES = ("FLOAT",)
|
216 |
+
CATEGORY = "mtb/math"
|
217 |
+
|
218 |
+
def set_range(
|
219 |
+
self,
|
220 |
+
value: float,
|
221 |
+
clamp: bool,
|
222 |
+
source_min: float,
|
223 |
+
source_max: float,
|
224 |
+
target_min: float,
|
225 |
+
target_max: float,
|
226 |
+
easing: str,
|
227 |
+
):
|
228 |
+
normalized_value = (value - source_min) / (source_max - source_min)
|
229 |
+
|
230 |
+
eased_value = apply_easing(normalized_value, easing)
|
231 |
+
|
232 |
+
# - Convert the eased value to the target range
|
233 |
+
res = target_min + (target_max - target_min) * eased_value
|
234 |
+
|
235 |
+
if clamp:
|
236 |
+
if target_min > target_max:
|
237 |
+
res = max(min(res, target_min), target_max)
|
238 |
+
else:
|
239 |
+
res = max(min(res, target_max), target_min)
|
240 |
+
|
241 |
+
return (res,)
|
242 |
+
|
243 |
+
|
244 |
+
__nodes__ = [StringReplace, FitNumber, GetBatchFromHistory, AnyToString]
|
comfy_mtb/nodes/image_interpolation.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
from pathlib import Path
|
3 |
+
import os
|
4 |
+
import glob
|
5 |
+
import folder_paths
|
6 |
+
from ..log import log
|
7 |
+
import torch
|
8 |
+
from frame_interpolation.eval import util, interpolator
|
9 |
+
import numpy as np
|
10 |
+
import comfy
|
11 |
+
import comfy.utils
|
12 |
+
import tensorflow as tf
|
13 |
+
import comfy.model_management as model_management
|
14 |
+
|
15 |
+
|
16 |
+
class LoadFilmModel:
|
17 |
+
"""Loads a FILM model"""
|
18 |
+
|
19 |
+
@staticmethod
|
20 |
+
def get_models() -> List[Path]:
|
21 |
+
models_path = os.path.join(folder_paths.models_dir, "FILM/*")
|
22 |
+
models = glob.glob(models_path)
|
23 |
+
models = [Path(x) for x in models if x.endswith(".onnx") or x.endswith(".pth")]
|
24 |
+
return models
|
25 |
+
|
26 |
+
@classmethod
|
27 |
+
def INPUT_TYPES(cls):
|
28 |
+
return {
|
29 |
+
"required": {
|
30 |
+
"film_model": (
|
31 |
+
["L1", "Style", "VGG"],
|
32 |
+
{"default": "Style"},
|
33 |
+
),
|
34 |
+
},
|
35 |
+
}
|
36 |
+
|
37 |
+
RETURN_TYPES = ("FILM_MODEL",)
|
38 |
+
FUNCTION = "load_model"
|
39 |
+
CATEGORY = "mtb/frame iterpolation"
|
40 |
+
|
41 |
+
def load_model(self, film_model: str):
|
42 |
+
model_path = Path(folder_paths.models_dir) / "FILM" / film_model
|
43 |
+
if not (model_path / "saved_model.pb").exists():
|
44 |
+
model_path = model_path / "saved_model"
|
45 |
+
|
46 |
+
if not model_path.exists():
|
47 |
+
log.error(f"Model {model_path} does not exist")
|
48 |
+
raise ValueError(f"Model {model_path} does not exist")
|
49 |
+
|
50 |
+
log.info(f"Loading model {model_path}")
|
51 |
+
|
52 |
+
return (interpolator.Interpolator(model_path.as_posix(), None),)
|
53 |
+
|
54 |
+
|
55 |
+
class FilmInterpolation:
|
56 |
+
"""Google Research FILM frame interpolation for large motion"""
|
57 |
+
|
58 |
+
@classmethod
|
59 |
+
def INPUT_TYPES(cls):
|
60 |
+
return {
|
61 |
+
"required": {
|
62 |
+
"images": ("IMAGE",),
|
63 |
+
"interpolate": ("INT", {"default": 2, "min": 1, "max": 50}),
|
64 |
+
"film_model": ("FILM_MODEL",),
|
65 |
+
},
|
66 |
+
}
|
67 |
+
|
68 |
+
RETURN_TYPES = ("IMAGE",)
|
69 |
+
FUNCTION = "do_interpolation"
|
70 |
+
CATEGORY = "mtb/frame iterpolation"
|
71 |
+
|
72 |
+
def do_interpolation(
|
73 |
+
self,
|
74 |
+
images: torch.Tensor,
|
75 |
+
interpolate: int,
|
76 |
+
film_model: interpolator.Interpolator,
|
77 |
+
):
|
78 |
+
n = images.size(0)
|
79 |
+
# check if images is an empty tensor and return it...
|
80 |
+
if n == 0:
|
81 |
+
return (images,)
|
82 |
+
|
83 |
+
# check if tensorflow GPU is available
|
84 |
+
available_gpus = tf.config.list_physical_devices("GPU")
|
85 |
+
if not len(available_gpus):
|
86 |
+
log.warning(
|
87 |
+
"Tensorflow GPU not available, falling back to CPU this will be very slow"
|
88 |
+
)
|
89 |
+
else:
|
90 |
+
log.debug(f"Tensorflow GPU available, using {available_gpus}")
|
91 |
+
|
92 |
+
num_frames = (n - 1) * (2 ** (interpolate) - 1)
|
93 |
+
log.debug(f"Will interpolate into {num_frames} frames")
|
94 |
+
|
95 |
+
in_frames = [images[i] for i in range(n)]
|
96 |
+
out_tensors = []
|
97 |
+
|
98 |
+
pbar = comfy.utils.ProgressBar(num_frames)
|
99 |
+
|
100 |
+
for frame in util.interpolate_recursively_from_memory(
|
101 |
+
in_frames, interpolate, film_model
|
102 |
+
):
|
103 |
+
out_tensors.append(
|
104 |
+
torch.from_numpy(frame) if isinstance(frame, np.ndarray) else frame
|
105 |
+
)
|
106 |
+
model_management.throw_exception_if_processing_interrupted()
|
107 |
+
pbar.update(1)
|
108 |
+
|
109 |
+
out_tensors = torch.cat([tens.unsqueeze(0) for tens in out_tensors], dim=0)
|
110 |
+
|
111 |
+
log.debug(f"Returning {len(out_tensors)} tensors")
|
112 |
+
log.debug(f"Output shape {out_tensors.shape}")
|
113 |
+
log.debug(f"Output type {out_tensors.dtype}")
|
114 |
+
return (out_tensors,)
|
115 |
+
|
116 |
+
|
117 |
+
class ConcatImages:
|
118 |
+
"""Add images to batch"""
|
119 |
+
|
120 |
+
RETURN_TYPES = ("IMAGE",)
|
121 |
+
FUNCTION = "concat_images"
|
122 |
+
CATEGORY = "mtb/image"
|
123 |
+
|
124 |
+
@classmethod
|
125 |
+
def INPUT_TYPES(cls):
|
126 |
+
return {
|
127 |
+
"required": {
|
128 |
+
"imageA": ("IMAGE",),
|
129 |
+
"imageB": ("IMAGE",),
|
130 |
+
},
|
131 |
+
}
|
132 |
+
|
133 |
+
@classmethod
|
134 |
+
def concatenate_tensors(cls, A: torch.Tensor, B: torch.Tensor):
|
135 |
+
# Get the batch sizes of A and B
|
136 |
+
batch_size_A = A.size(0)
|
137 |
+
batch_size_B = B.size(0)
|
138 |
+
|
139 |
+
# Concatenate the tensors along the batch dimension
|
140 |
+
concatenated = torch.cat((A, B), dim=0)
|
141 |
+
|
142 |
+
# Update the batch size in the concatenated tensor
|
143 |
+
concatenated_size = list(concatenated.size())
|
144 |
+
concatenated_size[0] = batch_size_A + batch_size_B
|
145 |
+
concatenated = concatenated.view(*concatenated_size)
|
146 |
+
|
147 |
+
return concatenated
|
148 |
+
|
149 |
+
def concat_images(self, imageA: torch.Tensor, imageB: torch.Tensor):
|
150 |
+
log.debug(f"Concatenating A ({imageA.shape}) and B ({imageB.shape})")
|
151 |
+
return (self.concatenate_tensors(imageA, imageB),)
|
152 |
+
|
153 |
+
|
154 |
+
__nodes__ = [LoadFilmModel, FilmInterpolation, ConcatImages]
|
comfy_mtb/nodes/image_processing.py
ADDED
@@ -0,0 +1,617 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from skimage.filters import gaussian
|
3 |
+
from skimage.util import compare_images
|
4 |
+
import numpy as np
|
5 |
+
import torch.nn.functional as F
|
6 |
+
from PIL import Image
|
7 |
+
from ..utils import tensor2pil, pil2tensor, tensor2np
|
8 |
+
import torch
|
9 |
+
import folder_paths
|
10 |
+
from PIL.PngImagePlugin import PngInfo
|
11 |
+
import json
|
12 |
+
import os
|
13 |
+
import math
|
14 |
+
|
15 |
+
|
16 |
+
# try:
|
17 |
+
# from cv2.ximgproc import guidedFilter
|
18 |
+
# except ImportError:
|
19 |
+
# log.warning("cv2.ximgproc.guidedFilter not found, use opencv-contrib-python")
|
20 |
+
|
21 |
+
|
22 |
+
class ColorCorrect:
|
23 |
+
"""Various color correction methods"""
|
24 |
+
|
25 |
+
@classmethod
|
26 |
+
def INPUT_TYPES(cls):
|
27 |
+
return {
|
28 |
+
"required": {
|
29 |
+
"image": ("IMAGE",),
|
30 |
+
"clamp": ([True, False], {"default": True}),
|
31 |
+
"gamma": (
|
32 |
+
"FLOAT",
|
33 |
+
{"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01},
|
34 |
+
),
|
35 |
+
"contrast": (
|
36 |
+
"FLOAT",
|
37 |
+
{"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01},
|
38 |
+
),
|
39 |
+
"exposure": (
|
40 |
+
"FLOAT",
|
41 |
+
{"default": 0.0, "min": -5.0, "max": 5.0, "step": 0.01},
|
42 |
+
),
|
43 |
+
"offset": (
|
44 |
+
"FLOAT",
|
45 |
+
{"default": 0.0, "min": -5.0, "max": 5.0, "step": 0.01},
|
46 |
+
),
|
47 |
+
"hue": (
|
48 |
+
"FLOAT",
|
49 |
+
{"default": 0.0, "min": -0.5, "max": 0.5, "step": 0.01},
|
50 |
+
),
|
51 |
+
"saturation": (
|
52 |
+
"FLOAT",
|
53 |
+
{"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01},
|
54 |
+
),
|
55 |
+
"value": (
|
56 |
+
"FLOAT",
|
57 |
+
{"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01},
|
58 |
+
),
|
59 |
+
}
|
60 |
+
}
|
61 |
+
|
62 |
+
RETURN_TYPES = ("IMAGE",)
|
63 |
+
FUNCTION = "correct"
|
64 |
+
CATEGORY = "mtb/image processing"
|
65 |
+
|
66 |
+
@staticmethod
|
67 |
+
def gamma_correction_tensor(image, gamma):
|
68 |
+
gamma_inv = 1.0 / gamma
|
69 |
+
return image.pow(gamma_inv)
|
70 |
+
|
71 |
+
@staticmethod
|
72 |
+
def contrast_adjustment_tensor(image, contrast):
|
73 |
+
contrasted = (image - 0.5) * contrast + 0.5
|
74 |
+
return torch.clamp(contrasted, 0.0, 1.0)
|
75 |
+
|
76 |
+
@staticmethod
|
77 |
+
def exposure_adjustment_tensor(image, exposure):
|
78 |
+
return image * (2.0**exposure)
|
79 |
+
|
80 |
+
@staticmethod
|
81 |
+
def offset_adjustment_tensor(image, offset):
|
82 |
+
return image + offset
|
83 |
+
|
84 |
+
@staticmethod
|
85 |
+
def hsv_adjustment(image: torch.Tensor, hue, saturation, value):
|
86 |
+
images = tensor2pil(image)
|
87 |
+
out = []
|
88 |
+
for img in images:
|
89 |
+
hsv_image = img.convert("HSV")
|
90 |
+
|
91 |
+
h, s, v = hsv_image.split()
|
92 |
+
|
93 |
+
h = h.point(lambda x: (x + hue * 255) % 256)
|
94 |
+
s = s.point(lambda x: int(x * saturation))
|
95 |
+
v = v.point(lambda x: int(x * value))
|
96 |
+
|
97 |
+
hsv_image = Image.merge("HSV", (h, s, v))
|
98 |
+
rgb_image = hsv_image.convert("RGB")
|
99 |
+
out.append(rgb_image)
|
100 |
+
return pil2tensor(out)
|
101 |
+
|
102 |
+
@staticmethod
|
103 |
+
def hsv_adjustment_tensor_not_working(image: torch.Tensor, hue, saturation, value):
|
104 |
+
"""Abandonning for now"""
|
105 |
+
image = image.squeeze(0).permute(2, 0, 1)
|
106 |
+
|
107 |
+
max_val, _ = image.max(dim=0, keepdim=True)
|
108 |
+
min_val, _ = image.min(dim=0, keepdim=True)
|
109 |
+
delta = max_val - min_val
|
110 |
+
|
111 |
+
hue_image = torch.zeros_like(max_val)
|
112 |
+
mask = delta != 0.0
|
113 |
+
|
114 |
+
r, g, b = image[0], image[1], image[2]
|
115 |
+
hue_image[mask & (max_val == r)] = ((g - b) / delta)[
|
116 |
+
mask & (max_val == r)
|
117 |
+
] % 6.0
|
118 |
+
hue_image[mask & (max_val == g)] = ((b - r) / delta)[
|
119 |
+
mask & (max_val == g)
|
120 |
+
] + 2.0
|
121 |
+
hue_image[mask & (max_val == b)] = ((r - g) / delta)[
|
122 |
+
mask & (max_val == b)
|
123 |
+
] + 4.0
|
124 |
+
|
125 |
+
saturation_image = delta / (max_val + 1e-7)
|
126 |
+
value_image = max_val
|
127 |
+
|
128 |
+
hue_image = (hue_image + hue) % 1.0
|
129 |
+
saturation_image = torch.where(
|
130 |
+
mask, saturation * saturation_image, saturation_image
|
131 |
+
)
|
132 |
+
value_image = value * value_image
|
133 |
+
|
134 |
+
c = value_image * saturation_image
|
135 |
+
x = c * (1 - torch.abs((hue_image % 2) - 1))
|
136 |
+
m = value_image - c
|
137 |
+
|
138 |
+
prime_image = torch.zeros_like(image)
|
139 |
+
prime_image[0] = torch.where(
|
140 |
+
max_val == r, c, torch.where(max_val == g, x, prime_image[0])
|
141 |
+
)
|
142 |
+
prime_image[1] = torch.where(
|
143 |
+
max_val == r, x, torch.where(max_val == g, c, prime_image[1])
|
144 |
+
)
|
145 |
+
prime_image[2] = torch.where(
|
146 |
+
max_val == g, x, torch.where(max_val == b, c, prime_image[2])
|
147 |
+
)
|
148 |
+
|
149 |
+
rgb_image = prime_image + m
|
150 |
+
|
151 |
+
rgb_image = rgb_image.permute(1, 2, 0).unsqueeze(0)
|
152 |
+
|
153 |
+
return rgb_image
|
154 |
+
|
155 |
+
def correct(
|
156 |
+
self,
|
157 |
+
image: torch.Tensor,
|
158 |
+
clamp: bool,
|
159 |
+
gamma: float = 1.0,
|
160 |
+
contrast: float = 1.0,
|
161 |
+
exposure: float = 0.0,
|
162 |
+
offset: float = 0.0,
|
163 |
+
hue: float = 0.0,
|
164 |
+
saturation: float = 1.0,
|
165 |
+
value: float = 1.0,
|
166 |
+
):
|
167 |
+
# Apply color correction operations
|
168 |
+
image = self.gamma_correction_tensor(image, gamma)
|
169 |
+
image = self.contrast_adjustment_tensor(image, contrast)
|
170 |
+
image = self.exposure_adjustment_tensor(image, exposure)
|
171 |
+
image = self.offset_adjustment_tensor(image, offset)
|
172 |
+
image = self.hsv_adjustment(image, hue, saturation, value)
|
173 |
+
|
174 |
+
if clamp:
|
175 |
+
image = torch.clamp(image, 0.0, 1.0)
|
176 |
+
|
177 |
+
return (image,)
|
178 |
+
|
179 |
+
|
180 |
+
class ImageCompare:
|
181 |
+
"""Compare two images and return a difference image"""
|
182 |
+
|
183 |
+
@classmethod
|
184 |
+
def INPUT_TYPES(cls):
|
185 |
+
return {
|
186 |
+
"required": {
|
187 |
+
"imageA": ("IMAGE",),
|
188 |
+
"imageB": ("IMAGE",),
|
189 |
+
"mode": (
|
190 |
+
["checkerboard", "diff", "blend"],
|
191 |
+
{"default": "checkerboard"},
|
192 |
+
),
|
193 |
+
}
|
194 |
+
}
|
195 |
+
|
196 |
+
RETURN_TYPES = ("IMAGE",)
|
197 |
+
FUNCTION = "compare"
|
198 |
+
CATEGORY = "mtb/image"
|
199 |
+
|
200 |
+
def compare(self, imageA: torch.Tensor, imageB: torch.Tensor, mode):
|
201 |
+
imageA = imageA.numpy()
|
202 |
+
imageB = imageB.numpy()
|
203 |
+
|
204 |
+
imageA = imageA.squeeze()
|
205 |
+
imageB = imageB.squeeze()
|
206 |
+
|
207 |
+
image = compare_images(imageA, imageB, method=mode)
|
208 |
+
|
209 |
+
image = np.expand_dims(image, axis=0)
|
210 |
+
return (torch.from_numpy(image),)
|
211 |
+
|
212 |
+
|
213 |
+
import requests
|
214 |
+
|
215 |
+
|
216 |
+
class LoadImageFromUrl:
|
217 |
+
"""Load an image from the given URL"""
|
218 |
+
|
219 |
+
@classmethod
|
220 |
+
def INPUT_TYPES(cls):
|
221 |
+
return {
|
222 |
+
"required": {
|
223 |
+
"url": (
|
224 |
+
"STRING",
|
225 |
+
{
|
226 |
+
"default": "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Example.jpg/800px-Example.jpg"
|
227 |
+
},
|
228 |
+
),
|
229 |
+
}
|
230 |
+
}
|
231 |
+
|
232 |
+
RETURN_TYPES = ("IMAGE",)
|
233 |
+
FUNCTION = "load"
|
234 |
+
CATEGORY = "mtb/IO"
|
235 |
+
|
236 |
+
def load(self, url):
|
237 |
+
# get the image from the url
|
238 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
239 |
+
return (pil2tensor(image),)
|
240 |
+
|
241 |
+
|
242 |
+
class Blur:
|
243 |
+
"""Blur an image using a Gaussian filter."""
|
244 |
+
|
245 |
+
@classmethod
|
246 |
+
def INPUT_TYPES(cls):
|
247 |
+
return {
|
248 |
+
"required": {
|
249 |
+
"image": ("IMAGE",),
|
250 |
+
"sigmaX": (
|
251 |
+
"FLOAT",
|
252 |
+
{"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.01},
|
253 |
+
),
|
254 |
+
"sigmaY": (
|
255 |
+
"FLOAT",
|
256 |
+
{"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.01},
|
257 |
+
),
|
258 |
+
}
|
259 |
+
}
|
260 |
+
|
261 |
+
RETURN_TYPES = ("IMAGE",)
|
262 |
+
FUNCTION = "blur"
|
263 |
+
CATEGORY = "mtb/image processing"
|
264 |
+
|
265 |
+
def blur(self, image: torch.Tensor, sigmaX, sigmaY):
|
266 |
+
image = image.numpy()
|
267 |
+
image = image.transpose(1, 2, 3, 0)
|
268 |
+
image = gaussian(image, sigma=(sigmaX, sigmaY, 0, 0))
|
269 |
+
image = image.transpose(3, 0, 1, 2)
|
270 |
+
return (torch.from_numpy(image),)
|
271 |
+
|
272 |
+
|
273 |
+
# https://github.com/lllyasviel/AdverseCleaner/blob/main/clean.py
|
274 |
+
# def deglaze_np_img(np_img):
|
275 |
+
# y = np_img.copy()
|
276 |
+
# for _ in range(64):
|
277 |
+
# y = cv2.bilateralFilter(y, 5, 8, 8)
|
278 |
+
# for _ in range(4):
|
279 |
+
# y = guidedFilter(np_img, y, 4, 16)
|
280 |
+
# return y
|
281 |
+
|
282 |
+
|
283 |
+
# class DeglazeImage:
|
284 |
+
# """Remove adversarial noise from images"""
|
285 |
+
|
286 |
+
# @classmethod
|
287 |
+
# def INPUT_TYPES(cls):
|
288 |
+
# return {"required": {"image": ("IMAGE",)}}
|
289 |
+
|
290 |
+
# CATEGORY = "mtb/image processing"
|
291 |
+
|
292 |
+
# RETURN_TYPES = ("IMAGE",)
|
293 |
+
# FUNCTION = "deglaze_image"
|
294 |
+
|
295 |
+
# def deglaze_image(self, image):
|
296 |
+
# return (np2tensor(deglaze_np_img(tensor2np(image))),)
|
297 |
+
|
298 |
+
|
299 |
+
class MaskToImage:
|
300 |
+
"""Converts a mask (alpha) to an RGB image with a color and background"""
|
301 |
+
|
302 |
+
@classmethod
|
303 |
+
def INPUT_TYPES(cls):
|
304 |
+
return {
|
305 |
+
"required": {
|
306 |
+
"mask": ("MASK",),
|
307 |
+
"color": ("COLOR",),
|
308 |
+
"background": ("COLOR", {"default": "#000000"}),
|
309 |
+
}
|
310 |
+
}
|
311 |
+
|
312 |
+
CATEGORY = "mtb/generate"
|
313 |
+
|
314 |
+
RETURN_TYPES = ("IMAGE",)
|
315 |
+
|
316 |
+
FUNCTION = "render_mask"
|
317 |
+
|
318 |
+
def render_mask(self, mask, color, background):
|
319 |
+
mask = tensor2np(mask)
|
320 |
+
mask = Image.fromarray(mask).convert("L")
|
321 |
+
|
322 |
+
image = Image.new("RGBA", mask.size, color=color)
|
323 |
+
# apply the mask
|
324 |
+
image = Image.composite(
|
325 |
+
image, Image.new("RGBA", mask.size, color=background), mask
|
326 |
+
)
|
327 |
+
|
328 |
+
# image = ImageChops.multiply(image, mask)
|
329 |
+
# apply over background
|
330 |
+
# image = Image.alpha_composite(Image.new("RGBA", image.size, color=background), image)
|
331 |
+
|
332 |
+
image = pil2tensor(image.convert("RGB"))
|
333 |
+
|
334 |
+
return (image,)
|
335 |
+
|
336 |
+
|
337 |
+
class ColoredImage:
|
338 |
+
"""Constant color image of given size"""
|
339 |
+
|
340 |
+
def __init__(self) -> None:
|
341 |
+
pass
|
342 |
+
|
343 |
+
@classmethod
|
344 |
+
def INPUT_TYPES(cls):
|
345 |
+
return {
|
346 |
+
"required": {
|
347 |
+
"color": ("COLOR",),
|
348 |
+
"width": ("INT", {"default": 512, "min": 16, "max": 8160}),
|
349 |
+
"height": ("INT", {"default": 512, "min": 16, "max": 8160}),
|
350 |
+
}
|
351 |
+
}
|
352 |
+
|
353 |
+
CATEGORY = "mtb/generate"
|
354 |
+
|
355 |
+
RETURN_TYPES = ("IMAGE",)
|
356 |
+
|
357 |
+
FUNCTION = "render_img"
|
358 |
+
|
359 |
+
def render_img(self, color, width, height):
|
360 |
+
image = Image.new("RGB", (width, height), color=color)
|
361 |
+
|
362 |
+
image = pil2tensor(image)
|
363 |
+
|
364 |
+
return (image,)
|
365 |
+
|
366 |
+
|
367 |
+
class ImagePremultiply:
|
368 |
+
"""Premultiply image with mask"""
|
369 |
+
|
370 |
+
@classmethod
|
371 |
+
def INPUT_TYPES(cls):
|
372 |
+
return {
|
373 |
+
"required": {
|
374 |
+
"image": ("IMAGE",),
|
375 |
+
"mask": ("MASK",),
|
376 |
+
"invert": ("BOOLEAN", {"default": False}),
|
377 |
+
}
|
378 |
+
}
|
379 |
+
|
380 |
+
CATEGORY = "mtb/image"
|
381 |
+
RETURN_TYPES = ("IMAGE",)
|
382 |
+
FUNCTION = "premultiply"
|
383 |
+
|
384 |
+
def premultiply(self, image, mask, invert):
|
385 |
+
images = tensor2pil(image)
|
386 |
+
if invert:
|
387 |
+
masks = tensor2pil(mask) # .convert("L")
|
388 |
+
else:
|
389 |
+
masks = tensor2pil(1.0 - mask)
|
390 |
+
|
391 |
+
single = False
|
392 |
+
if len(mask) == 1:
|
393 |
+
single = True
|
394 |
+
|
395 |
+
masks = [x.convert("L") for x in masks]
|
396 |
+
|
397 |
+
out = []
|
398 |
+
for i, img in enumerate(images):
|
399 |
+
cur_mask = masks[0] if single else masks[i]
|
400 |
+
|
401 |
+
img.putalpha(cur_mask)
|
402 |
+
out.append(img)
|
403 |
+
|
404 |
+
# if invert:
|
405 |
+
# image = Image.composite(image,Image.new("RGBA", image.size, color=(0,0,0,0)), mask)
|
406 |
+
# else:
|
407 |
+
# image = Image.composite(Image.new("RGBA", image.size, color=(0,0,0,0)), image, mask)
|
408 |
+
|
409 |
+
return (pil2tensor(out),)
|
410 |
+
|
411 |
+
|
412 |
+
class ImageResizeFactor:
|
413 |
+
"""Extracted mostly from WAS Node Suite, with a few edits (most notably multiple image support) and less features."""
|
414 |
+
|
415 |
+
@classmethod
|
416 |
+
def INPUT_TYPES(cls):
|
417 |
+
return {
|
418 |
+
"required": {
|
419 |
+
"image": ("IMAGE",),
|
420 |
+
"factor": (
|
421 |
+
"FLOAT",
|
422 |
+
{"default": 2, "min": 0.01, "max": 16.0, "step": 0.01},
|
423 |
+
),
|
424 |
+
"supersample": ("BOOLEAN", {"default": True}),
|
425 |
+
"resampling": (
|
426 |
+
[
|
427 |
+
"nearest",
|
428 |
+
"linear",
|
429 |
+
"bilinear",
|
430 |
+
"bicubic",
|
431 |
+
"trilinear",
|
432 |
+
"area",
|
433 |
+
"nearest-exact",
|
434 |
+
],
|
435 |
+
{"default": "nearest"},
|
436 |
+
),
|
437 |
+
},
|
438 |
+
"optional": {
|
439 |
+
"mask": ("MASK",),
|
440 |
+
},
|
441 |
+
}
|
442 |
+
|
443 |
+
CATEGORY = "mtb/image"
|
444 |
+
RETURN_TYPES = ("IMAGE", "MASK")
|
445 |
+
FUNCTION = "resize"
|
446 |
+
|
447 |
+
def resize(
|
448 |
+
self,
|
449 |
+
image: torch.Tensor,
|
450 |
+
factor: float,
|
451 |
+
supersample: bool,
|
452 |
+
resampling: str,
|
453 |
+
mask=None,
|
454 |
+
):
|
455 |
+
# Check if the tensor has the correct dimension
|
456 |
+
if len(image.shape) not in [3, 4]: # HxWxC or BxHxWxC
|
457 |
+
raise ValueError("Expected image tensor of shape (H, W, C) or (B, H, W, C)")
|
458 |
+
|
459 |
+
# Transpose to CxHxW or BxCxHxW for PyTorch
|
460 |
+
if len(image.shape) == 3:
|
461 |
+
image = image.permute(2, 0, 1).unsqueeze(0) # CxHxW
|
462 |
+
else:
|
463 |
+
image = image.permute(0, 3, 1, 2) # BxCxHxW
|
464 |
+
|
465 |
+
# Compute new dimensions
|
466 |
+
B, C, H, W = image.shape
|
467 |
+
new_H, new_W = int(H * factor), int(W * factor)
|
468 |
+
|
469 |
+
align_corner_filters = ("linear", "bilinear", "bicubic", "trilinear")
|
470 |
+
# Resize the image
|
471 |
+
resized_image = F.interpolate(
|
472 |
+
image,
|
473 |
+
size=(new_H, new_W),
|
474 |
+
mode=resampling,
|
475 |
+
align_corners=resampling in align_corner_filters,
|
476 |
+
)
|
477 |
+
|
478 |
+
# Optionally supersample
|
479 |
+
if supersample:
|
480 |
+
resized_image = F.interpolate(
|
481 |
+
resized_image,
|
482 |
+
scale_factor=2,
|
483 |
+
mode=resampling,
|
484 |
+
align_corners=resampling in align_corner_filters,
|
485 |
+
)
|
486 |
+
|
487 |
+
# Transpose back to the original format: BxHxWxC or HxWxC
|
488 |
+
if len(image.shape) == 4:
|
489 |
+
resized_image = resized_image.permute(0, 2, 3, 1)
|
490 |
+
else:
|
491 |
+
resized_image = resized_image.squeeze(0).permute(1, 2, 0)
|
492 |
+
|
493 |
+
# Apply mask if provided
|
494 |
+
if mask is not None:
|
495 |
+
if len(mask.shape) != len(resized_image.shape):
|
496 |
+
raise ValueError(
|
497 |
+
"Mask tensor should have the same dimensions as the image tensor"
|
498 |
+
)
|
499 |
+
resized_image = resized_image * mask
|
500 |
+
|
501 |
+
return (resized_image,)
|
502 |
+
|
503 |
+
|
504 |
+
class SaveImageGrid:
|
505 |
+
"""Save all the images in the input batch as a grid of images."""
|
506 |
+
|
507 |
+
def __init__(self):
|
508 |
+
self.output_dir = folder_paths.get_output_directory()
|
509 |
+
self.type = "output"
|
510 |
+
|
511 |
+
@classmethod
|
512 |
+
def INPUT_TYPES(cls):
|
513 |
+
return {
|
514 |
+
"required": {
|
515 |
+
"images": ("IMAGE",),
|
516 |
+
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
|
517 |
+
"save_intermediate": ("BOOLEAN", {"default": False}),
|
518 |
+
},
|
519 |
+
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
520 |
+
}
|
521 |
+
|
522 |
+
RETURN_TYPES = ()
|
523 |
+
FUNCTION = "save_images"
|
524 |
+
|
525 |
+
OUTPUT_NODE = True
|
526 |
+
|
527 |
+
CATEGORY = "mtb/IO"
|
528 |
+
|
529 |
+
def create_image_grid(self, image_list):
|
530 |
+
total_images = len(image_list)
|
531 |
+
|
532 |
+
# Calculate the grid size based on the square root of the total number of images
|
533 |
+
grid_size = (
|
534 |
+
int(math.sqrt(total_images)),
|
535 |
+
int(math.ceil(math.sqrt(total_images))),
|
536 |
+
)
|
537 |
+
|
538 |
+
# Get the size of the first image to determine the grid size
|
539 |
+
image_width, image_height = image_list[0].size
|
540 |
+
|
541 |
+
# Create a new blank image to hold the grid
|
542 |
+
grid_width = grid_size[0] * image_width
|
543 |
+
grid_height = grid_size[1] * image_height
|
544 |
+
grid_image = Image.new("RGB", (grid_width, grid_height))
|
545 |
+
|
546 |
+
# Iterate over the images and paste them onto the grid
|
547 |
+
for i, image in enumerate(image_list):
|
548 |
+
x = (i % grid_size[0]) * image_width
|
549 |
+
y = (i // grid_size[0]) * image_height
|
550 |
+
grid_image.paste(image, (x, y, x + image_width, y + image_height))
|
551 |
+
|
552 |
+
return grid_image
|
553 |
+
|
554 |
+
def save_images(
|
555 |
+
self,
|
556 |
+
images,
|
557 |
+
filename_prefix="Grid",
|
558 |
+
save_intermediate=False,
|
559 |
+
prompt=None,
|
560 |
+
extra_pnginfo=None,
|
561 |
+
):
|
562 |
+
(
|
563 |
+
full_output_folder,
|
564 |
+
filename,
|
565 |
+
counter,
|
566 |
+
subfolder,
|
567 |
+
filename_prefix,
|
568 |
+
) = folder_paths.get_save_image_path(
|
569 |
+
filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]
|
570 |
+
)
|
571 |
+
image_list = []
|
572 |
+
batch_counter = counter
|
573 |
+
|
574 |
+
metadata = PngInfo()
|
575 |
+
if prompt is not None:
|
576 |
+
metadata.add_text("prompt", json.dumps(prompt))
|
577 |
+
if extra_pnginfo is not None:
|
578 |
+
for x in extra_pnginfo:
|
579 |
+
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
|
580 |
+
|
581 |
+
for idx, image in enumerate(images):
|
582 |
+
i = 255.0 * image.cpu().numpy()
|
583 |
+
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
584 |
+
image_list.append(img)
|
585 |
+
|
586 |
+
if save_intermediate:
|
587 |
+
file = f"{filename}_batch-{idx:03}_{batch_counter:05}_.png"
|
588 |
+
img.save(
|
589 |
+
os.path.join(full_output_folder, file),
|
590 |
+
pnginfo=metadata,
|
591 |
+
compress_level=4,
|
592 |
+
)
|
593 |
+
|
594 |
+
batch_counter += 1
|
595 |
+
|
596 |
+
file = f"{filename}_{counter:05}_.png"
|
597 |
+
grid = self.create_image_grid(image_list)
|
598 |
+
grid.save(
|
599 |
+
os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4
|
600 |
+
)
|
601 |
+
|
602 |
+
results = [{"filename": file, "subfolder": subfolder, "type": self.type}]
|
603 |
+
return {"ui": {"images": results}}
|
604 |
+
|
605 |
+
|
606 |
+
__nodes__ = [
|
607 |
+
ColorCorrect,
|
608 |
+
ImageCompare,
|
609 |
+
Blur,
|
610 |
+
# DeglazeImage,
|
611 |
+
MaskToImage,
|
612 |
+
ColoredImage,
|
613 |
+
ImagePremultiply,
|
614 |
+
ImageResizeFactor,
|
615 |
+
SaveImageGrid,
|
616 |
+
LoadImageFromUrl,
|
617 |
+
]
|
comfy_mtb/nodes/io.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..utils import tensor2np, PIL_FILTER_MAP
|
2 |
+
import uuid
|
3 |
+
import folder_paths
|
4 |
+
from ..log import log
|
5 |
+
import comfy.model_management as model_management
|
6 |
+
import subprocess
|
7 |
+
import torch
|
8 |
+
from pathlib import Path
|
9 |
+
import numpy as np
|
10 |
+
from PIL import Image
|
11 |
+
from typing import Optional, List
|
12 |
+
|
13 |
+
|
14 |
+
class ExportWithFfmpeg:
|
15 |
+
"""Export with FFmpeg (Experimental)"""
|
16 |
+
|
17 |
+
@classmethod
|
18 |
+
def INPUT_TYPES(cls):
|
19 |
+
return {
|
20 |
+
"required": {
|
21 |
+
"images": ("IMAGE",),
|
22 |
+
# "frames": ("FRAMES",),
|
23 |
+
"fps": ("FLOAT", {"default": 24, "min": 1}),
|
24 |
+
"prefix": ("STRING", {"default": "export"}),
|
25 |
+
"format": (["mov", "mp4", "mkv", "avi"], {"default": "mov"}),
|
26 |
+
"codec": (
|
27 |
+
["prores_ks", "libx264", "libx265"],
|
28 |
+
{"default": "prores_ks"},
|
29 |
+
),
|
30 |
+
}
|
31 |
+
}
|
32 |
+
|
33 |
+
RETURN_TYPES = ("VIDEO",)
|
34 |
+
OUTPUT_NODE = True
|
35 |
+
FUNCTION = "export_prores"
|
36 |
+
CATEGORY = "mtb/IO"
|
37 |
+
|
38 |
+
def export_prores(
|
39 |
+
self,
|
40 |
+
images: torch.Tensor,
|
41 |
+
fps: float,
|
42 |
+
prefix: str,
|
43 |
+
format: str,
|
44 |
+
codec: str,
|
45 |
+
):
|
46 |
+
if images.size(0) == 0:
|
47 |
+
return ("",)
|
48 |
+
output_dir = Path(folder_paths.get_output_directory())
|
49 |
+
pix_fmt = "rgb48le" if codec == "prores_ks" else "yuv420p"
|
50 |
+
file_ext = format
|
51 |
+
file_id = f"{prefix}_{uuid.uuid4()}.{file_ext}"
|
52 |
+
|
53 |
+
log.debug(f"Exporting to {output_dir / file_id}")
|
54 |
+
|
55 |
+
frames = tensor2np(images)
|
56 |
+
log.debug(f"Frames type {type(frames[0])}")
|
57 |
+
log.debug(f"Exporting {len(frames)} frames")
|
58 |
+
|
59 |
+
frames = [frame.astype(np.uint16) * 257 for frame in frames]
|
60 |
+
|
61 |
+
height, width, _ = frames[0].shape
|
62 |
+
|
63 |
+
out_path = (output_dir / file_id).as_posix()
|
64 |
+
|
65 |
+
# Prepare the FFmpeg command
|
66 |
+
command = [
|
67 |
+
"ffmpeg",
|
68 |
+
"-y",
|
69 |
+
"-f",
|
70 |
+
"rawvideo",
|
71 |
+
"-vcodec",
|
72 |
+
"rawvideo",
|
73 |
+
"-s",
|
74 |
+
f"{width}x{height}",
|
75 |
+
"-pix_fmt",
|
76 |
+
pix_fmt,
|
77 |
+
"-r",
|
78 |
+
str(fps),
|
79 |
+
"-i",
|
80 |
+
"-",
|
81 |
+
"-c:v",
|
82 |
+
codec,
|
83 |
+
"-r",
|
84 |
+
str(fps),
|
85 |
+
"-y",
|
86 |
+
out_path,
|
87 |
+
]
|
88 |
+
|
89 |
+
process = subprocess.Popen(command, stdin=subprocess.PIPE)
|
90 |
+
|
91 |
+
for frame in frames:
|
92 |
+
model_management.throw_exception_if_processing_interrupted()
|
93 |
+
process.stdin.write(frame.tobytes())
|
94 |
+
|
95 |
+
process.stdin.close()
|
96 |
+
process.wait()
|
97 |
+
|
98 |
+
return (out_path,)
|
99 |
+
|
100 |
+
|
101 |
+
def prepare_animated_batch(
|
102 |
+
batch: torch.Tensor,
|
103 |
+
pingpong=False,
|
104 |
+
resize_by=1.0,
|
105 |
+
resample_filter: Optional[Image.Resampling] = None,
|
106 |
+
image_type=np.uint8,
|
107 |
+
) -> List[Image.Image]:
|
108 |
+
images = tensor2np(batch)
|
109 |
+
images = [frame.astype(image_type) for frame in images]
|
110 |
+
|
111 |
+
height, width, _ = batch[0].shape
|
112 |
+
|
113 |
+
if pingpong:
|
114 |
+
reversed_frames = images[::-1]
|
115 |
+
images.extend(reversed_frames)
|
116 |
+
pil_images = [Image.fromarray(frame) for frame in images]
|
117 |
+
|
118 |
+
# Resize frames if necessary
|
119 |
+
if abs(resize_by - 1.0) > 1e-6:
|
120 |
+
new_width = int(width * resize_by)
|
121 |
+
new_height = int(height * resize_by)
|
122 |
+
pil_images_resized = [
|
123 |
+
frame.resize((new_width, new_height), resample=resample_filter)
|
124 |
+
for frame in pil_images
|
125 |
+
]
|
126 |
+
pil_images = pil_images_resized
|
127 |
+
|
128 |
+
return pil_images
|
129 |
+
|
130 |
+
|
131 |
+
# todo: deprecate for apng
|
132 |
+
class SaveGif:
|
133 |
+
"""Save the images from the batch as a GIF"""
|
134 |
+
|
135 |
+
@classmethod
|
136 |
+
def INPUT_TYPES(cls):
|
137 |
+
return {
|
138 |
+
"required": {
|
139 |
+
"image": ("IMAGE",),
|
140 |
+
"fps": ("INT", {"default": 12, "min": 1, "max": 120}),
|
141 |
+
"resize_by": ("FLOAT", {"default": 1.0, "min": 0.1}),
|
142 |
+
"optimize": ("BOOLEAN", {"default": False}),
|
143 |
+
"pingpong": ("BOOLEAN", {"default": False}),
|
144 |
+
},
|
145 |
+
"optional": {
|
146 |
+
"resample_filter": (list(PIL_FILTER_MAP.keys()),),
|
147 |
+
},
|
148 |
+
}
|
149 |
+
|
150 |
+
RETURN_TYPES = ()
|
151 |
+
OUTPUT_NODE = True
|
152 |
+
CATEGORY = "mtb/IO"
|
153 |
+
FUNCTION = "save_gif"
|
154 |
+
|
155 |
+
def save_gif(
|
156 |
+
self,
|
157 |
+
image,
|
158 |
+
fps=12,
|
159 |
+
resize_by=1.0,
|
160 |
+
optimize=False,
|
161 |
+
pingpong=False,
|
162 |
+
resample_filter=None,
|
163 |
+
):
|
164 |
+
if image.size(0) == 0:
|
165 |
+
return ("",)
|
166 |
+
|
167 |
+
if resample_filter is not None:
|
168 |
+
resample_filter = PIL_FILTER_MAP.get(resample_filter)
|
169 |
+
|
170 |
+
pil_images = prepare_animated_batch(
|
171 |
+
image,
|
172 |
+
pingpong,
|
173 |
+
resize_by,
|
174 |
+
resample_filter,
|
175 |
+
)
|
176 |
+
|
177 |
+
ruuid = uuid.uuid4()
|
178 |
+
ruuid = ruuid.hex[:10]
|
179 |
+
out_path = f"{folder_paths.output_directory}/{ruuid}.gif"
|
180 |
+
|
181 |
+
# Create the GIF from PIL images
|
182 |
+
pil_images[0].save(
|
183 |
+
out_path,
|
184 |
+
save_all=True,
|
185 |
+
append_images=pil_images[1:],
|
186 |
+
optimize=optimize,
|
187 |
+
duration=int(1000 / fps),
|
188 |
+
loop=0,
|
189 |
+
)
|
190 |
+
|
191 |
+
results = [{"filename": f"{ruuid}.gif", "subfolder": "", "type": "output"}]
|
192 |
+
return {"ui": {"gif": results}}
|
193 |
+
|
194 |
+
|
195 |
+
__nodes__ = [SaveGif, ExportWithFfmpeg]
|
comfy_mtb/nodes/latent_processing.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
class LatentLerp:
|
5 |
+
"""Linear interpolation (blend) between two latent vectors"""
|
6 |
+
|
7 |
+
@classmethod
|
8 |
+
def INPUT_TYPES(cls):
|
9 |
+
return {
|
10 |
+
"required": {
|
11 |
+
"A": ("LATENT",),
|
12 |
+
"B": ("LATENT",),
|
13 |
+
"t": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
14 |
+
}
|
15 |
+
}
|
16 |
+
|
17 |
+
RETURN_TYPES = ("LATENT",)
|
18 |
+
FUNCTION = "lerp_latent"
|
19 |
+
|
20 |
+
CATEGORY = "mtb/latent"
|
21 |
+
|
22 |
+
def lerp_latent(self, A, B, t):
|
23 |
+
a = A.copy()
|
24 |
+
b = B.copy()
|
25 |
+
|
26 |
+
torch.lerp(a["samples"], b["samples"], t, out=a["samples"])
|
27 |
+
|
28 |
+
return (a,)
|
29 |
+
|
30 |
+
|
31 |
+
__nodes__ = [
|
32 |
+
LatentLerp,
|
33 |
+
]
|
comfy_mtb/nodes/mask.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from rembg import remove
|
2 |
+
from ..utils import pil2tensor, tensor2pil
|
3 |
+
from PIL import Image
|
4 |
+
import comfy.utils
|
5 |
+
|
6 |
+
|
7 |
+
class ImageRemoveBackgroundRembg:
|
8 |
+
"""Removes the background from the input using Rembg."""
|
9 |
+
|
10 |
+
@classmethod
|
11 |
+
def INPUT_TYPES(cls):
|
12 |
+
return {
|
13 |
+
"required": {
|
14 |
+
"image": ("IMAGE",),
|
15 |
+
"alpha_matting": (
|
16 |
+
"BOOLEAN",
|
17 |
+
{"default": False},
|
18 |
+
),
|
19 |
+
"alpha_matting_foreground_threshold": (
|
20 |
+
"INT",
|
21 |
+
{"default": 240, "min": 0, "max": 255},
|
22 |
+
),
|
23 |
+
"alpha_matting_background_threshold": (
|
24 |
+
"INT",
|
25 |
+
{"default": 10, "min": 0, "max": 255},
|
26 |
+
),
|
27 |
+
"alpha_matting_erode_size": (
|
28 |
+
"INT",
|
29 |
+
{"default": 10, "min": 0, "max": 255},
|
30 |
+
),
|
31 |
+
"post_process_mask": (
|
32 |
+
"BOOLEAN",
|
33 |
+
{"default": False},
|
34 |
+
),
|
35 |
+
"bgcolor": (
|
36 |
+
"COLOR",
|
37 |
+
{"default": "#000000"},
|
38 |
+
),
|
39 |
+
},
|
40 |
+
}
|
41 |
+
|
42 |
+
RETURN_TYPES = (
|
43 |
+
"IMAGE",
|
44 |
+
"MASK",
|
45 |
+
"IMAGE",
|
46 |
+
)
|
47 |
+
RETURN_NAMES = (
|
48 |
+
"Image (rgba)",
|
49 |
+
"Mask",
|
50 |
+
"Image",
|
51 |
+
)
|
52 |
+
FUNCTION = "remove_background"
|
53 |
+
CATEGORY = "mtb/image"
|
54 |
+
|
55 |
+
# bgcolor: Optional[Tuple[int, int, int, int]]
|
56 |
+
def remove_background(
|
57 |
+
self,
|
58 |
+
image,
|
59 |
+
alpha_matting,
|
60 |
+
alpha_matting_foreground_threshold,
|
61 |
+
alpha_matting_background_threshold,
|
62 |
+
alpha_matting_erode_size,
|
63 |
+
post_process_mask,
|
64 |
+
bgcolor,
|
65 |
+
):
|
66 |
+
pbar = comfy.utils.ProgressBar(image.size(0))
|
67 |
+
images = tensor2pil(image)
|
68 |
+
|
69 |
+
out_img = []
|
70 |
+
out_mask = []
|
71 |
+
out_img_on_bg = []
|
72 |
+
|
73 |
+
for img in images:
|
74 |
+
img_rm = remove(
|
75 |
+
data=img,
|
76 |
+
alpha_matting=alpha_matting,
|
77 |
+
alpha_matting_foreground_threshold=alpha_matting_foreground_threshold,
|
78 |
+
alpha_matting_background_threshold=alpha_matting_background_threshold,
|
79 |
+
alpha_matting_erode_size=alpha_matting_erode_size,
|
80 |
+
session=None,
|
81 |
+
only_mask=False,
|
82 |
+
post_process_mask=post_process_mask,
|
83 |
+
bgcolor=None,
|
84 |
+
)
|
85 |
+
|
86 |
+
# extract the alpha to a new image
|
87 |
+
mask = img_rm.getchannel(3)
|
88 |
+
|
89 |
+
# add our bgcolor behind the image
|
90 |
+
image_on_bg = Image.new("RGBA", img_rm.size, bgcolor)
|
91 |
+
|
92 |
+
image_on_bg.paste(img_rm, mask=mask)
|
93 |
+
|
94 |
+
out_img.append(img_rm)
|
95 |
+
out_mask.append(mask)
|
96 |
+
out_img_on_bg.append(image_on_bg)
|
97 |
+
|
98 |
+
pbar.update(1)
|
99 |
+
|
100 |
+
return (pil2tensor(out_img), pil2tensor(out_mask), pil2tensor(out_img_on_bg))
|
101 |
+
|
102 |
+
|
103 |
+
__nodes__ = [
|
104 |
+
ImageRemoveBackgroundRembg,
|
105 |
+
]
|
comfy_mtb/nodes/number.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class IntToBool:
|
2 |
+
"""Basic int to bool conversion"""
|
3 |
+
|
4 |
+
@classmethod
|
5 |
+
def INPUT_TYPES(cls):
|
6 |
+
return {
|
7 |
+
"required": {
|
8 |
+
"int": (
|
9 |
+
"INT",
|
10 |
+
{
|
11 |
+
"default": 0,
|
12 |
+
},
|
13 |
+
),
|
14 |
+
}
|
15 |
+
}
|
16 |
+
|
17 |
+
RETURN_TYPES = ("BOOLEAN",)
|
18 |
+
FUNCTION = "int_to_bool"
|
19 |
+
CATEGORY = "mtb/number"
|
20 |
+
|
21 |
+
def int_to_bool(self, int):
|
22 |
+
return (bool(int),)
|
23 |
+
|
24 |
+
|
25 |
+
class IntToNumber:
|
26 |
+
"""Node addon for the WAS Suite. Converts a "comfy" INT to a NUMBER."""
|
27 |
+
|
28 |
+
@classmethod
|
29 |
+
def INPUT_TYPES(cls):
|
30 |
+
return {
|
31 |
+
"required": {
|
32 |
+
"int": (
|
33 |
+
"INT",
|
34 |
+
{
|
35 |
+
"default": 0,
|
36 |
+
"min": -1e9,
|
37 |
+
"max": 1e9,
|
38 |
+
"step": 1,
|
39 |
+
"forceInput": True,
|
40 |
+
},
|
41 |
+
),
|
42 |
+
}
|
43 |
+
}
|
44 |
+
|
45 |
+
RETURN_TYPES = ("NUMBER",)
|
46 |
+
FUNCTION = "int_to_number"
|
47 |
+
CATEGORY = "mtb/number"
|
48 |
+
|
49 |
+
def int_to_number(self, int):
|
50 |
+
return (int,)
|
51 |
+
|
52 |
+
|
53 |
+
class FloatToNumber:
|
54 |
+
"""Node addon for the WAS Suite. Converts a "comfy" FLOAT to a NUMBER."""
|
55 |
+
|
56 |
+
@classmethod
|
57 |
+
def INPUT_TYPES(cls):
|
58 |
+
return {
|
59 |
+
"required": {
|
60 |
+
"float": (
|
61 |
+
"FLOAT",
|
62 |
+
{
|
63 |
+
"default": 0,
|
64 |
+
"min": -1e9,
|
65 |
+
"max": 1e9,
|
66 |
+
"step": 1,
|
67 |
+
"forceInput": True,
|
68 |
+
},
|
69 |
+
),
|
70 |
+
}
|
71 |
+
}
|
72 |
+
|
73 |
+
RETURN_TYPES = ("NUMBER",)
|
74 |
+
FUNCTION = "float_to_number"
|
75 |
+
CATEGORY = "mtb/number"
|
76 |
+
|
77 |
+
def float_to_number(self, float):
|
78 |
+
return (float,)
|
79 |
+
|
80 |
+
return (int,)
|
81 |
+
|
82 |
+
|
83 |
+
__nodes__ = [
|
84 |
+
FloatToNumber,
|
85 |
+
IntToBool,
|
86 |
+
IntToNumber,
|
87 |
+
]
|
comfy_mtb/nodes/transform.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision.transforms.functional as TF
|
3 |
+
from ..utils import log, hex_to_rgb, tensor2pil, pil2tensor
|
4 |
+
from math import sqrt, ceil
|
5 |
+
from typing import cast
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
|
9 |
+
class TransformImage:
|
10 |
+
"""Save torch tensors (image, mask or latent) to disk, useful to debug things outside comfy
|
11 |
+
|
12 |
+
|
13 |
+
it return a tensor representing the transformed images with the same shape as the input tensor
|
14 |
+
"""
|
15 |
+
|
16 |
+
@classmethod
|
17 |
+
def INPUT_TYPES(cls):
|
18 |
+
return {
|
19 |
+
"required": {
|
20 |
+
"image": ("IMAGE",),
|
21 |
+
"x": ("FLOAT", {"default": 0, "step": 1, "min": -4096, "max": 4096}),
|
22 |
+
"y": ("FLOAT", {"default": 0, "step": 1, "min": -4096, "max": 4096}),
|
23 |
+
"zoom": ("FLOAT", {"default": 1.0, "min": 0.001, "step": 0.01}),
|
24 |
+
"angle": ("FLOAT", {"default": 0, "step": 1, "min": -360, "max": 360}),
|
25 |
+
"shear": (
|
26 |
+
"FLOAT",
|
27 |
+
{"default": 0, "step": 1, "min": -4096, "max": 4096},
|
28 |
+
),
|
29 |
+
"border_handling": (
|
30 |
+
["edge", "constant", "reflect", "symmetric"],
|
31 |
+
{"default": "edge"},
|
32 |
+
),
|
33 |
+
"constant_color": ("COLOR", {"default": "#000000"}),
|
34 |
+
},
|
35 |
+
}
|
36 |
+
|
37 |
+
FUNCTION = "transform"
|
38 |
+
RETURN_TYPES = ("IMAGE",)
|
39 |
+
CATEGORY = "mtb/transform"
|
40 |
+
|
41 |
+
def transform(
|
42 |
+
self,
|
43 |
+
image: torch.Tensor,
|
44 |
+
x: float,
|
45 |
+
y: float,
|
46 |
+
zoom: float,
|
47 |
+
angle: float,
|
48 |
+
shear: float,
|
49 |
+
border_handling="edge",
|
50 |
+
constant_color=None,
|
51 |
+
):
|
52 |
+
x = int(x)
|
53 |
+
y = int(y)
|
54 |
+
angle = int(angle)
|
55 |
+
|
56 |
+
log.debug(f"Zoom: {zoom} | x: {x}, y: {y}, angle: {angle}, shear: {shear}")
|
57 |
+
|
58 |
+
if image.size(0) == 0:
|
59 |
+
return (torch.zeros(0),)
|
60 |
+
transformed_images = []
|
61 |
+
frames_count, frame_height, frame_width, frame_channel_count = image.size()
|
62 |
+
|
63 |
+
new_height, new_width = int(frame_height * zoom), int(frame_width * zoom)
|
64 |
+
|
65 |
+
log.debug(f"New height: {new_height}, New width: {new_width}")
|
66 |
+
|
67 |
+
# - Calculate diagonal of the original image
|
68 |
+
diagonal = sqrt(frame_width**2 + frame_height**2)
|
69 |
+
max_padding = ceil(diagonal * zoom - min(frame_width, frame_height))
|
70 |
+
# Calculate padding for zoom
|
71 |
+
pw = int(frame_width - new_width)
|
72 |
+
ph = int(frame_height - new_height)
|
73 |
+
|
74 |
+
pw += abs(max_padding)
|
75 |
+
ph += abs(max_padding)
|
76 |
+
|
77 |
+
padding = [max(0, pw + x), max(0, ph + y), max(0, pw - x), max(0, ph - y)]
|
78 |
+
|
79 |
+
constant_color = hex_to_rgb(constant_color)
|
80 |
+
log.debug(f"Fill Tuple: {constant_color}")
|
81 |
+
|
82 |
+
for img in tensor2pil(image):
|
83 |
+
img = TF.pad(
|
84 |
+
img, # transformed_frame,
|
85 |
+
padding=padding,
|
86 |
+
padding_mode=border_handling,
|
87 |
+
fill=constant_color or 0,
|
88 |
+
)
|
89 |
+
|
90 |
+
img = cast(
|
91 |
+
Image.Image,
|
92 |
+
TF.affine(img, angle=angle, scale=zoom, translate=[x, y], shear=shear),
|
93 |
+
)
|
94 |
+
|
95 |
+
left = abs(padding[0])
|
96 |
+
upper = abs(padding[1])
|
97 |
+
right = img.width - abs(padding[2])
|
98 |
+
bottom = img.height - abs(padding[3])
|
99 |
+
|
100 |
+
# log.debug("crop is [:,top:bottom, left:right] for tensors")
|
101 |
+
log.debug("crop is [left, top, right, bottom] for PIL")
|
102 |
+
log.debug(f"crop is {left}, {upper}, {right}, {bottom}")
|
103 |
+
img = img.crop((left, upper, right, bottom))
|
104 |
+
|
105 |
+
transformed_images.append(img)
|
106 |
+
|
107 |
+
return (pil2tensor(transformed_images),)
|
108 |
+
|
109 |
+
|
110 |
+
__nodes__ = [TransformImage]
|
comfy_mtb/nodes/video.py
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import hashlib
|
6 |
+
from PIL import Image, ImageOps
|
7 |
+
from PIL.PngImagePlugin import PngInfo
|
8 |
+
import folder_paths
|
9 |
+
from pathlib import Path
|
10 |
+
import json
|
11 |
+
|
12 |
+
from ..log import log
|
13 |
+
|
14 |
+
|
15 |
+
class LoadImageSequence:
|
16 |
+
"""Load an image sequence from a folder. The current frame is used to determine which image to load.
|
17 |
+
|
18 |
+
Usually used in conjunction with the `Primitive` node set to increment to load a sequence of images from a folder.
|
19 |
+
Use -1 to load all matching frames as a batch.
|
20 |
+
"""
|
21 |
+
|
22 |
+
@classmethod
|
23 |
+
def INPUT_TYPES(cls):
|
24 |
+
return {
|
25 |
+
"required": {
|
26 |
+
"path": ("STRING", {"default": "videos/####.png"}),
|
27 |
+
"current_frame": (
|
28 |
+
"INT",
|
29 |
+
{"default": 0, "min": -1, "max": 9999999},
|
30 |
+
),
|
31 |
+
}
|
32 |
+
}
|
33 |
+
|
34 |
+
CATEGORY = "mtb/IO"
|
35 |
+
FUNCTION = "load_image"
|
36 |
+
RETURN_TYPES = (
|
37 |
+
"IMAGE",
|
38 |
+
"MASK",
|
39 |
+
"INT",
|
40 |
+
)
|
41 |
+
RETURN_NAMES = (
|
42 |
+
"image",
|
43 |
+
"mask",
|
44 |
+
"current_frame",
|
45 |
+
)
|
46 |
+
|
47 |
+
def load_image(self, path=None, current_frame=0):
|
48 |
+
load_all = current_frame == -1
|
49 |
+
|
50 |
+
if load_all:
|
51 |
+
log.debug(f"Loading all frames from {path}")
|
52 |
+
frames = resolve_all_frames(path)
|
53 |
+
log.debug(f"Found {len(frames)} frames")
|
54 |
+
|
55 |
+
imgs = []
|
56 |
+
masks = []
|
57 |
+
|
58 |
+
for frame in frames:
|
59 |
+
img, mask = img_from_path(frame)
|
60 |
+
imgs.append(img)
|
61 |
+
masks.append(mask)
|
62 |
+
|
63 |
+
out_img = torch.cat(imgs, dim=0)
|
64 |
+
out_mask = torch.cat(masks, dim=0)
|
65 |
+
|
66 |
+
return (
|
67 |
+
out_img,
|
68 |
+
out_mask,
|
69 |
+
)
|
70 |
+
|
71 |
+
log.debug(f"Loading image: {path}, {current_frame}")
|
72 |
+
print(f"Loading image: {path}, {current_frame}")
|
73 |
+
resolved_path = resolve_path(path, current_frame)
|
74 |
+
image_path = folder_paths.get_annotated_filepath(resolved_path)
|
75 |
+
image, mask = img_from_path(image_path)
|
76 |
+
return (
|
77 |
+
image,
|
78 |
+
mask,
|
79 |
+
current_frame,
|
80 |
+
)
|
81 |
+
|
82 |
+
@staticmethod
|
83 |
+
def IS_CHANGED(path="", current_frame=0):
|
84 |
+
print(f"Checking if changed: {path}, {current_frame}")
|
85 |
+
resolved_path = resolve_path(path, current_frame)
|
86 |
+
image_path = folder_paths.get_annotated_filepath(resolved_path)
|
87 |
+
if os.path.exists(image_path):
|
88 |
+
m = hashlib.sha256()
|
89 |
+
with open(image_path, "rb") as f:
|
90 |
+
m.update(f.read())
|
91 |
+
return m.digest().hex()
|
92 |
+
return "NONE"
|
93 |
+
|
94 |
+
# @staticmethod
|
95 |
+
# def VALIDATE_INPUTS(path="", current_frame=0):
|
96 |
+
|
97 |
+
# print(f"Validating inputs: {path}, {current_frame}")
|
98 |
+
# resolved_path = resolve_path(path, current_frame)
|
99 |
+
# if not folder_paths.exists_annotated_filepath(resolved_path):
|
100 |
+
# return f"Invalid image file: {resolved_path}"
|
101 |
+
# return True
|
102 |
+
|
103 |
+
|
104 |
+
import glob
|
105 |
+
|
106 |
+
|
107 |
+
def img_from_path(path):
|
108 |
+
img = Image.open(path)
|
109 |
+
img = ImageOps.exif_transpose(img)
|
110 |
+
image = img.convert("RGB")
|
111 |
+
image = np.array(image).astype(np.float32) / 255.0
|
112 |
+
image = torch.from_numpy(image)[None,]
|
113 |
+
if "A" in img.getbands():
|
114 |
+
mask = np.array(img.getchannel("A")).astype(np.float32) / 255.0
|
115 |
+
mask = 1.0 - torch.from_numpy(mask)
|
116 |
+
else:
|
117 |
+
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
118 |
+
return (
|
119 |
+
image,
|
120 |
+
mask,
|
121 |
+
)
|
122 |
+
|
123 |
+
|
124 |
+
def resolve_all_frames(pattern):
|
125 |
+
folder_path, file_pattern = os.path.split(pattern)
|
126 |
+
|
127 |
+
log.debug(f"Resolving all frames in {folder_path}")
|
128 |
+
frames = []
|
129 |
+
hash_count = file_pattern.count("#")
|
130 |
+
frame_pattern = re.sub(r"#+", "*", file_pattern)
|
131 |
+
|
132 |
+
log.debug(f"Found pattern: {frame_pattern}")
|
133 |
+
|
134 |
+
matching_files = glob.glob(os.path.join(folder_path, frame_pattern))
|
135 |
+
|
136 |
+
log.debug(f"Found {len(matching_files)} matching files")
|
137 |
+
|
138 |
+
frame_regex = re.escape(file_pattern).replace(r"\#", r"(\d+)")
|
139 |
+
|
140 |
+
frame_number_regex = re.compile(frame_regex)
|
141 |
+
|
142 |
+
for file in matching_files:
|
143 |
+
match = frame_number_regex.search(file)
|
144 |
+
if match:
|
145 |
+
frame_number = match.group(1)
|
146 |
+
log.debug(f"Found frame number: {frame_number}")
|
147 |
+
# resolved_file = pattern.replace("*" * frame_number.count("#"), frame_number)
|
148 |
+
frames.append(file)
|
149 |
+
|
150 |
+
frames.sort() # Sort frames alphabetically
|
151 |
+
return frames
|
152 |
+
|
153 |
+
|
154 |
+
def resolve_path(path, frame):
|
155 |
+
hashes = path.count("#")
|
156 |
+
padded_number = str(frame).zfill(hashes)
|
157 |
+
return re.sub("#+", padded_number, path)
|
158 |
+
|
159 |
+
|
160 |
+
class SaveImageSequence:
|
161 |
+
"""Save an image sequence to a folder. The current frame is used to determine which image to save.
|
162 |
+
|
163 |
+
This is merely a wrapper around the `save_images` function with formatting for the output folder and filename.
|
164 |
+
"""
|
165 |
+
|
166 |
+
def __init__(self):
|
167 |
+
self.output_dir = folder_paths.get_output_directory()
|
168 |
+
self.type = "output"
|
169 |
+
|
170 |
+
@classmethod
|
171 |
+
def INPUT_TYPES(cls):
|
172 |
+
return {
|
173 |
+
"required": {
|
174 |
+
"images": ("IMAGE",),
|
175 |
+
"filename_prefix": ("STRING", {"default": "Sequence"}),
|
176 |
+
"current_frame": ("INT", {"default": 0, "min": 0, "max": 9999999}),
|
177 |
+
},
|
178 |
+
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
179 |
+
}
|
180 |
+
|
181 |
+
RETURN_TYPES = ()
|
182 |
+
FUNCTION = "save_images"
|
183 |
+
|
184 |
+
OUTPUT_NODE = True
|
185 |
+
|
186 |
+
CATEGORY = "mtb/IO"
|
187 |
+
|
188 |
+
def save_images(
|
189 |
+
self,
|
190 |
+
images,
|
191 |
+
filename_prefix="Sequence",
|
192 |
+
current_frame=0,
|
193 |
+
prompt=None,
|
194 |
+
extra_pnginfo=None,
|
195 |
+
):
|
196 |
+
# full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
|
197 |
+
# results = list()
|
198 |
+
# for image in images:
|
199 |
+
# i = 255. * image.cpu().numpy()
|
200 |
+
# img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
201 |
+
# metadata = PngInfo()
|
202 |
+
# if prompt is not None:
|
203 |
+
# metadata.add_text("prompt", json.dumps(prompt))
|
204 |
+
# if extra_pnginfo is not None:
|
205 |
+
# for x in extra_pnginfo:
|
206 |
+
# metadata.add_text(x, json.dumps(extra_pnginfo[x]))
|
207 |
+
|
208 |
+
# file = f"{filename}_{counter:05}_.png"
|
209 |
+
# img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
|
210 |
+
# results.append({
|
211 |
+
# "filename": file,
|
212 |
+
# "subfolder": subfolder,
|
213 |
+
# "type": self.type
|
214 |
+
# })
|
215 |
+
# counter += 1
|
216 |
+
|
217 |
+
if len(images) > 1:
|
218 |
+
raise ValueError("Can only save one image at a time")
|
219 |
+
|
220 |
+
resolved_path = Path(self.output_dir) / filename_prefix
|
221 |
+
resolved_path.mkdir(parents=True, exist_ok=True)
|
222 |
+
|
223 |
+
resolved_img = resolved_path / f"{filename_prefix}_{current_frame:05}.png"
|
224 |
+
|
225 |
+
output_image = images[0].cpu().numpy()
|
226 |
+
img = Image.fromarray(np.clip(output_image * 255.0, 0, 255).astype(np.uint8))
|
227 |
+
metadata = PngInfo()
|
228 |
+
if prompt is not None:
|
229 |
+
metadata.add_text("prompt", json.dumps(prompt))
|
230 |
+
if extra_pnginfo is not None:
|
231 |
+
for x in extra_pnginfo:
|
232 |
+
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
|
233 |
+
|
234 |
+
img.save(resolved_img, pnginfo=metadata, compress_level=4)
|
235 |
+
return {
|
236 |
+
"ui": {
|
237 |
+
"images": [
|
238 |
+
{
|
239 |
+
"filename": resolved_img.name,
|
240 |
+
"subfolder": resolved_path.name,
|
241 |
+
"type": self.type,
|
242 |
+
}
|
243 |
+
]
|
244 |
+
}
|
245 |
+
}
|
246 |
+
|
247 |
+
|
248 |
+
__nodes__ = [
|
249 |
+
LoadImageSequence,
|
250 |
+
SaveImageSequence,
|
251 |
+
]
|