Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- README.md +2 -1
- app.py +12 -41
- chain_injectors/diffsynth_controlnet_injector.py +75 -0
- chain_injectors/flux1_ipadapter_injector.py +46 -0
- chain_injectors/newbie_lora_injector.py +63 -0
- chain_injectors/reference_latent_injector.py +157 -0
- chain_injectors/sd3_ipadapter_injector.py +66 -0
- chain_injectors/style_injector.py +71 -0
- comfy_integration/nodes.py +5 -0
- comfy_integration/setup.py +30 -13
- core/generation_logic.py +0 -15
- core/model_manager.py +3 -4
- core/pipelines/controlnet_preprocessor.py +0 -143
- core/pipelines/sd_image_pipeline.py +189 -64
- core/pipelines/workflow_recipes/_partials/{_base_sampler.yaml → _base_sampler_sd.yaml} +15 -2
- core/pipelines/workflow_recipes/_partials/conditioning/anima.yaml +54 -0
- core/pipelines/workflow_recipes/_partials/conditioning/chroma1-radiance.yaml +59 -0
- core/pipelines/workflow_recipes/_partials/conditioning/chroma1.yaml +61 -0
- core/pipelines/workflow_recipes/_partials/conditioning/ernie-image.yaml +54 -0
- core/pipelines/workflow_recipes/_partials/conditioning/flux1.yaml +64 -0
- core/pipelines/workflow_recipes/_partials/conditioning/flux2-kv.yaml +104 -0
- core/pipelines/workflow_recipes/_partials/conditioning/flux2.yaml +96 -0
- core/pipelines/workflow_recipes/_partials/conditioning/hidream.yaml +53 -0
- core/pipelines/workflow_recipes/_partials/conditioning/hunyuanimage.yaml +42 -0
- core/pipelines/workflow_recipes/_partials/conditioning/longcat-image.yaml +83 -0
- core/pipelines/workflow_recipes/_partials/conditioning/lumina.yaml +51 -0
- core/pipelines/workflow_recipes/_partials/conditioning/newbie-image.yaml +65 -0
- core/pipelines/workflow_recipes/_partials/conditioning/omnigen2.yaml +59 -0
- core/pipelines/workflow_recipes/_partials/conditioning/ovis-image.yaml +50 -0
- core/pipelines/workflow_recipes/_partials/conditioning/qwen-image.yaml +80 -0
- core/pipelines/workflow_recipes/_partials/conditioning/sd15.yaml +63 -0
- core/pipelines/workflow_recipes/_partials/conditioning/sd35.yaml +52 -0
- core/pipelines/workflow_recipes/_partials/conditioning/sdxl.yaml +15 -22
- core/pipelines/workflow_recipes/_partials/conditioning/z-image.yaml +65 -0
- core/pipelines/workflow_recipes/_partials/input/hires_fix.yaml +4 -3
- core/pipelines/workflow_recipes/_partials/input/img2img.yaml +3 -2
- core/pipelines/workflow_recipes/_partials/input/inpaint.yaml +6 -8
- core/pipelines/workflow_recipes/_partials/input/outpaint.yaml +14 -11
- core/pipelines/workflow_recipes/_partials/input/txt2img.yaml +2 -8
- core/pipelines/workflow_recipes/_partials/input/txt2img_chroma_radiance_latent.yaml +11 -0
- core/pipelines/workflow_recipes/_partials/input/txt2img_flux2_latent.yaml +11 -0
- core/pipelines/workflow_recipes/_partials/input/txt2img_hunyuan_latent.yaml +11 -0
- core/pipelines/workflow_recipes/_partials/input/txt2img_latent.yaml +11 -0
- core/pipelines/workflow_recipes/_partials/input/txt2img_sd3_latent.yaml +11 -0
- core/pipelines/workflow_recipes/sd_unified_recipe.yaml +3 -5
- core/settings.py +111 -22
- requirements.txt +2 -1
- ui/events.py +849 -269
- ui/layout.py +15 -58
- ui/shared/hires_fix_ui.py +16 -5
README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
---
|
| 2 |
-
title: ImageGen
|
| 3 |
emoji: 🖼
|
| 4 |
colorFrom: purple
|
| 5 |
colorTo: red
|
|
@@ -7,4 +7,5 @@ sdk: gradio
|
|
| 7 |
sdk_version: "5.50.0"
|
| 8 |
app_file: app.py
|
| 9 |
short_description: Multi-task image generator with dynamic, chainable workflows
|
|
|
|
| 10 |
---
|
|
|
|
| 1 |
---
|
| 2 |
+
title: ImageGen
|
| 3 |
emoji: 🖼
|
| 4 |
colorFrom: purple
|
| 5 |
colorTo: red
|
|
|
|
| 7 |
sdk_version: "5.50.0"
|
| 8 |
app_file: app.py
|
| 9 |
short_description: Multi-task image generator with dynamic, chainable workflows
|
| 10 |
+
pinned: true
|
| 11 |
---
|
app.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
| 1 |
import spaces
|
| 2 |
import os
|
| 3 |
import sys
|
| 4 |
-
import requests
|
| 5 |
import site
|
| 6 |
|
| 7 |
APP_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
@@ -45,9 +44,11 @@ def dummy_gpu_for_startup():
|
|
| 45 |
print("--- [GPU Startup] Startup check passed. ---")
|
| 46 |
return "Startup check passed."
|
| 47 |
|
|
|
|
| 48 |
def main():
|
| 49 |
from utils.app_utils import print_welcome_message
|
| 50 |
from scripts import build_sage_attention
|
|
|
|
| 51 |
|
| 52 |
print_welcome_message()
|
| 53 |
|
|
@@ -58,7 +59,9 @@ def main():
|
|
| 58 |
except Exception as e:
|
| 59 |
print(f"--- [Setup] ❌ SageAttention installation failed: {e}. Continuing with default attention. ---")
|
| 60 |
|
| 61 |
-
|
|
|
|
|
|
|
| 62 |
print("--- [Setup] Reloading site-packages to detect newly installed packages... ---")
|
| 63 |
try:
|
| 64 |
site.main()
|
|
@@ -66,48 +69,16 @@ def main():
|
|
| 66 |
except Exception as e:
|
| 67 |
print(f"--- [Setup] ⚠️ Warning: Could not fully reload site-packages: {e} ---")
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
)
|
| 75 |
-
from core import shared_state
|
| 76 |
-
from core.settings import ALL_MODEL_MAP, ALL_FILE_DOWNLOAD_MAP
|
| 77 |
-
|
| 78 |
-
def check_all_model_urls_on_startup():
|
| 79 |
-
print("--- [Setup] Checking all model URL validity (one-time check) ---")
|
| 80 |
-
for display_name, model_info in ALL_MODEL_MAP.items():
|
| 81 |
-
repo_id, filename, _, _ = model_info
|
| 82 |
-
if not repo_id: continue
|
| 83 |
-
|
| 84 |
-
download_info = ALL_FILE_DOWNLOAD_MAP.get(filename, {})
|
| 85 |
-
repo_file_path = download_info.get('repository_file_path', filename)
|
| 86 |
-
url = f"https://huggingface.co/{repo_id}/resolve/main/{repo_file_path}"
|
| 87 |
-
|
| 88 |
-
try:
|
| 89 |
-
response = requests.head(url, timeout=5, allow_redirects=True)
|
| 90 |
-
if response.status_code >= 400:
|
| 91 |
-
print(f"❌ Invalid URL for '{display_name}': {url} (Status: {response.status_code})")
|
| 92 |
-
shared_state.INVALID_MODEL_URLS[display_name] = True
|
| 93 |
-
except requests.RequestException as e:
|
| 94 |
-
print(f"❌ URL check failed for '{display_name}': {e}")
|
| 95 |
-
shared_state.INVALID_MODEL_URLS[display_name] = True
|
| 96 |
-
print("--- [Setup] ✅ Finished checking model URLs. ---")
|
| 97 |
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
setup_comfyui.initialize_comfyui()
|
| 101 |
|
| 102 |
-
|
| 103 |
|
| 104 |
-
print("--- Building ControlNet preprocessor maps ---")
|
| 105 |
-
from core.generation_logic import build_reverse_map
|
| 106 |
-
build_reverse_map()
|
| 107 |
-
build_preprocessor_model_map()
|
| 108 |
-
build_preprocessor_parameter_map()
|
| 109 |
-
print("--- ✅ ControlNet preprocessor setup complete. ---")
|
| 110 |
-
|
| 111 |
print("--- Loading IPAdapter presets ---")
|
| 112 |
load_ipadapter_presets()
|
| 113 |
print("--- ✅ IPAdapter setup complete. ---")
|
|
|
|
| 1 |
import spaces
|
| 2 |
import os
|
| 3 |
import sys
|
|
|
|
| 4 |
import site
|
| 5 |
|
| 6 |
APP_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
| 44 |
print("--- [GPU Startup] Startup check passed. ---")
|
| 45 |
return "Startup check passed."
|
| 46 |
|
| 47 |
+
|
| 48 |
def main():
|
| 49 |
from utils.app_utils import print_welcome_message
|
| 50 |
from scripts import build_sage_attention
|
| 51 |
+
from comfy_integration import setup as setup_comfyui
|
| 52 |
|
| 53 |
print_welcome_message()
|
| 54 |
|
|
|
|
| 59 |
except Exception as e:
|
| 60 |
print(f"--- [Setup] ❌ SageAttention installation failed: {e}. Continuing with default attention. ---")
|
| 61 |
|
| 62 |
+
print("--- [Setup] Starting ComfyUI initialization ---")
|
| 63 |
+
setup_comfyui.initialize_comfyui()
|
| 64 |
+
|
| 65 |
print("--- [Setup] Reloading site-packages to detect newly installed packages... ---")
|
| 66 |
try:
|
| 67 |
site.main()
|
|
|
|
| 69 |
except Exception as e:
|
| 70 |
print(f"--- [Setup] ⚠️ Warning: Could not fully reload site-packages: {e} ---")
|
| 71 |
|
| 72 |
+
print("--- Initiating GPU Startup Check & SageAttention Patch ---")
|
| 73 |
+
try:
|
| 74 |
+
dummy_gpu_for_startup()
|
| 75 |
+
except Exception as e:
|
| 76 |
+
print(f"--- [GPU Startup] ⚠️ Warning: Startup check failed: {e} ---")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
+
from utils.app_utils import load_ipadapter_presets
|
|
|
|
|
|
|
| 79 |
|
| 80 |
+
print("--- Starting Application Setup ---")
|
| 81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
print("--- Loading IPAdapter presets ---")
|
| 83 |
load_ipadapter_presets()
|
| 84 |
print("--- ✅ IPAdapter setup complete. ---")
|
chain_injectors/diffsynth_controlnet_injector.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def inject(assembler, chain_definition, chain_items):
|
| 2 |
+
if not chain_items:
|
| 3 |
+
return
|
| 4 |
+
|
| 5 |
+
model_sampler_name = chain_definition.get('model_sampler_node')
|
| 6 |
+
ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
|
| 7 |
+
|
| 8 |
+
target_node_id = None
|
| 9 |
+
target_input_name = 'model'
|
| 10 |
+
|
| 11 |
+
if model_sampler_name and model_sampler_name in assembler.node_map:
|
| 12 |
+
model_sampler_id = assembler.node_map[model_sampler_name]
|
| 13 |
+
if target_input_name in assembler.workflow[model_sampler_id]['inputs']:
|
| 14 |
+
target_node_id = model_sampler_id
|
| 15 |
+
print(f"ControlNet Model Patch injector targeting ModelSamplingAuraFlow node '{model_sampler_name}'.")
|
| 16 |
+
|
| 17 |
+
if not target_node_id:
|
| 18 |
+
if ksampler_name in assembler.node_map:
|
| 19 |
+
ksampler_id = assembler.node_map[ksampler_name]
|
| 20 |
+
if target_input_name in assembler.workflow[ksampler_id]['inputs']:
|
| 21 |
+
target_node_id = ksampler_id
|
| 22 |
+
print(f"ControlNet Model Patch injector targeting KSampler node '{ksampler_name}'.")
|
| 23 |
+
else:
|
| 24 |
+
print(f"Warning: Neither ModelSamplingAuraFlow node '{model_sampler_name}' nor KSampler node '{ksampler_name}' found for ControlNet patch chain. Skipping.")
|
| 25 |
+
return
|
| 26 |
+
|
| 27 |
+
if not target_node_id:
|
| 28 |
+
print(f"Warning: Could not find a valid 'model' input on target nodes. Skipping ControlNet patch chain.")
|
| 29 |
+
return
|
| 30 |
+
|
| 31 |
+
current_model_connection = assembler.workflow[target_node_id]['inputs'][target_input_name]
|
| 32 |
+
|
| 33 |
+
vae_source_str = chain_definition.get('vae_source')
|
| 34 |
+
vae_connection = None
|
| 35 |
+
if vae_source_str:
|
| 36 |
+
try:
|
| 37 |
+
vae_node_name, vae_idx_str = vae_source_str.split(':')
|
| 38 |
+
if vae_node_name in assembler.node_map:
|
| 39 |
+
vae_connection = [assembler.node_map[vae_node_name], int(vae_idx_str)]
|
| 40 |
+
else:
|
| 41 |
+
print(f"Warning: VAE source node '{vae_node_name}' not found for ControlNet patch chain. VAE will not be connected.")
|
| 42 |
+
except ValueError:
|
| 43 |
+
print(f"Warning: Invalid 'vae_source' format '{vae_source_str}' for ControlNet patch chain. Expected 'node_name:index'. VAE will not be connected.")
|
| 44 |
+
else:
|
| 45 |
+
print(f"Warning: 'vae_source' not defined for ControlNet patch chain definition. VAE may not be connected.")
|
| 46 |
+
|
| 47 |
+
for item_data in chain_items:
|
| 48 |
+
patch_loader_id = assembler._get_unique_id()
|
| 49 |
+
patch_loader_node = assembler._get_node_template("ModelPatchLoader")
|
| 50 |
+
patch_loader_node['inputs']['name'] = item_data['control_net_name']
|
| 51 |
+
assembler.workflow[patch_loader_id] = patch_loader_node
|
| 52 |
+
|
| 53 |
+
image_loader_id = assembler._get_unique_id()
|
| 54 |
+
image_loader_node = assembler._get_node_template("LoadImage")
|
| 55 |
+
image_loader_node['inputs']['image'] = item_data['image']
|
| 56 |
+
assembler.workflow[image_loader_id] = image_loader_node
|
| 57 |
+
|
| 58 |
+
apply_cn_id = assembler._get_unique_id()
|
| 59 |
+
apply_cn_node = assembler._get_node_template(chain_definition['template'])
|
| 60 |
+
|
| 61 |
+
apply_cn_node['inputs']['strength'] = item_data.get('strength', 1.0)
|
| 62 |
+
apply_cn_node['inputs']['model'] = current_model_connection
|
| 63 |
+
apply_cn_node['inputs']['model_patch'] = [patch_loader_id, 0]
|
| 64 |
+
apply_cn_node['inputs']['image'] = [image_loader_id, 0]
|
| 65 |
+
|
| 66 |
+
if 'vae' in apply_cn_node['inputs'] and vae_connection:
|
| 67 |
+
apply_cn_node['inputs']['vae'] = vae_connection
|
| 68 |
+
|
| 69 |
+
assembler.workflow[apply_cn_id] = apply_cn_node
|
| 70 |
+
|
| 71 |
+
current_model_connection = [apply_cn_id, 0]
|
| 72 |
+
|
| 73 |
+
assembler.workflow[target_node_id]['inputs'][target_input_name] = current_model_connection
|
| 74 |
+
|
| 75 |
+
print(f"ControlNet Model Patch injector applied. Target 'model' input re-routed through {len(chain_items)} patch(es).")
|
chain_injectors/flux1_ipadapter_injector.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def inject(assembler, chain_definition, chain_items):
|
| 2 |
+
if not chain_items:
|
| 3 |
+
return
|
| 4 |
+
|
| 5 |
+
ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
|
| 6 |
+
if ksampler_name not in assembler.node_map:
|
| 7 |
+
print(f"Warning: KSampler node '{ksampler_name}' not found for Flux1 IPAdapter chain. Skipping.")
|
| 8 |
+
return
|
| 9 |
+
|
| 10 |
+
ksampler_id = assembler.node_map[ksampler_name]
|
| 11 |
+
|
| 12 |
+
if 'model' not in assembler.workflow[ksampler_id]['inputs']:
|
| 13 |
+
print(f"Warning: KSampler node '{ksampler_name}' is missing 'model' input. Skipping Flux1 IPAdapter chain.")
|
| 14 |
+
return
|
| 15 |
+
|
| 16 |
+
current_model_connection = assembler.workflow[ksampler_id]['inputs']['model']
|
| 17 |
+
|
| 18 |
+
for item_data in chain_items:
|
| 19 |
+
image_loader_id = assembler._get_unique_id()
|
| 20 |
+
image_loader_node = assembler._get_node_template("LoadImage")
|
| 21 |
+
image_loader_node['inputs']['image'] = item_data['image']
|
| 22 |
+
assembler.workflow[image_loader_id] = image_loader_node
|
| 23 |
+
|
| 24 |
+
ipadapter_loader_id = assembler._get_unique_id()
|
| 25 |
+
ipadapter_loader_node = assembler._get_node_template("IPAdapterFluxLoader")
|
| 26 |
+
ipadapter_loader_node['inputs']['ipadapter'] = "ip-adapter.bin"
|
| 27 |
+
ipadapter_loader_node['inputs']['clip_vision'] = "google/siglip-so400m-patch14-384"
|
| 28 |
+
ipadapter_loader_node['inputs']['provider'] = "cuda"
|
| 29 |
+
assembler.workflow[ipadapter_loader_id] = ipadapter_loader_node
|
| 30 |
+
|
| 31 |
+
apply_ipa_id = assembler._get_unique_id()
|
| 32 |
+
apply_ipa_node = assembler._get_node_template("ApplyIPAdapterFlux")
|
| 33 |
+
|
| 34 |
+
apply_ipa_node['inputs']['weight'] = item_data['weight']
|
| 35 |
+
apply_ipa_node['inputs']['start_percent'] = item_data.get('start_percent', 0.0)
|
| 36 |
+
apply_ipa_node['inputs']['end_percent'] = item_data.get('end_percent', 0.6)
|
| 37 |
+
|
| 38 |
+
apply_ipa_node['inputs']['model'] = current_model_connection
|
| 39 |
+
apply_ipa_node['inputs']['ipadapter_flux'] = [ipadapter_loader_id, 0]
|
| 40 |
+
apply_ipa_node['inputs']['image'] = [image_loader_id, 0]
|
| 41 |
+
|
| 42 |
+
assembler.workflow[apply_ipa_id] = apply_ipa_node
|
| 43 |
+
current_model_connection = [apply_ipa_id, 0]
|
| 44 |
+
|
| 45 |
+
assembler.workflow[ksampler_id]['inputs']['model'] = current_model_connection
|
| 46 |
+
print(f"Flux1 IPAdapter injector applied. KSampler model input re-routed through {len(chain_items)} IPAdapter(s).")
|
chain_injectors/newbie_lora_injector.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from copy import deepcopy
|
| 2 |
+
|
| 3 |
+
def inject(assembler, chain_definition, chain_items):
|
| 4 |
+
if not chain_items:
|
| 5 |
+
return
|
| 6 |
+
|
| 7 |
+
output_map = chain_definition.get('output_map', {})
|
| 8 |
+
current_connections = {}
|
| 9 |
+
for key, type_name in output_map.items():
|
| 10 |
+
if ':' in str(key):
|
| 11 |
+
node_name, idx_str = key.split(':')
|
| 12 |
+
if node_name not in assembler.node_map:
|
| 13 |
+
print(f"Warning: [NewBie LoRA Injector] Node '{node_name}' in chain's output_map not found. Skipping.")
|
| 14 |
+
continue
|
| 15 |
+
node_id = assembler.node_map[node_name]
|
| 16 |
+
start_output_idx = int(idx_str)
|
| 17 |
+
current_connections[type_name] = [node_id, start_output_idx]
|
| 18 |
+
else:
|
| 19 |
+
print(f"Warning: [NewBie LoRA Injector] output_map key '{key}' is not in 'node:index' format. Skipping this connection.")
|
| 20 |
+
|
| 21 |
+
template_name = chain_definition.get('template')
|
| 22 |
+
if not template_name:
|
| 23 |
+
print(f"Warning: [NewBie LoRA Injector] No 'template' defined for chain. Skipping.")
|
| 24 |
+
return
|
| 25 |
+
|
| 26 |
+
for item_data in chain_items:
|
| 27 |
+
template = assembler._get_node_template(template_name)
|
| 28 |
+
node_data = deepcopy(template)
|
| 29 |
+
|
| 30 |
+
node_data['inputs']['lora_name'] = item_data.get('lora_name')
|
| 31 |
+
node_data['inputs']['strength'] = item_data.get('strength_model', 1.0)
|
| 32 |
+
node_data['inputs']['enabled'] = True
|
| 33 |
+
|
| 34 |
+
if 'model' in current_connections:
|
| 35 |
+
node_data['inputs']['model'] = current_connections['model']
|
| 36 |
+
if 'clip' in current_connections:
|
| 37 |
+
node_data['inputs']['clip'] = current_connections['clip']
|
| 38 |
+
|
| 39 |
+
new_node_id = assembler._get_unique_id()
|
| 40 |
+
assembler.workflow[new_node_id] = node_data
|
| 41 |
+
|
| 42 |
+
current_connections['model'] = [new_node_id, 0]
|
| 43 |
+
current_connections['clip'] = [new_node_id, 1]
|
| 44 |
+
|
| 45 |
+
end_input_map = chain_definition.get('end_input_map', {})
|
| 46 |
+
for type_name, targets in end_input_map.items():
|
| 47 |
+
if type_name in current_connections:
|
| 48 |
+
if not isinstance(targets, list):
|
| 49 |
+
targets = [targets]
|
| 50 |
+
|
| 51 |
+
for target_str in targets:
|
| 52 |
+
try:
|
| 53 |
+
end_node_name, end_input_name = target_str.split(':')
|
| 54 |
+
if end_node_name in assembler.node_map:
|
| 55 |
+
end_node_id = assembler.node_map[end_node_name]
|
| 56 |
+
assembler.workflow[end_node_id]['inputs'][end_input_name] = current_connections[type_name]
|
| 57 |
+
else:
|
| 58 |
+
print(f"Warning: [NewBie LoRA Injector] End node '{end_node_name}' for dynamic chain not found. Skipping connection.")
|
| 59 |
+
except ValueError:
|
| 60 |
+
print(f"Warning: [NewBie LoRA Injector] Invalid target format '{target_str}' in end_input_map. Skipping.")
|
| 61 |
+
|
| 62 |
+
if chain_items:
|
| 63 |
+
print(f"NewBie LoRA injector applied. Re-routed model and clip through {len(chain_items)} LoRA(s).")
|
chain_injectors/reference_latent_injector.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def inject(assembler, chain_definition, chain_items):
|
| 2 |
+
if not chain_items:
|
| 3 |
+
return
|
| 4 |
+
|
| 5 |
+
guider_node_name = chain_definition.get('guider_node')
|
| 6 |
+
guider_target_inputs = chain_definition.get('guider_target_inputs', [])
|
| 7 |
+
start_connections_map = chain_definition.get('start_connections', {})
|
| 8 |
+
vae_node_name = chain_definition.get('vae_node', 'vae_loader')
|
| 9 |
+
|
| 10 |
+
if guider_node_name and guider_node_name in assembler.node_map and guider_target_inputs:
|
| 11 |
+
guider_id = assembler.node_map[guider_node_name]
|
| 12 |
+
if vae_node_name not in assembler.node_map:
|
| 13 |
+
print(f"Warning: VAE node '{vae_node_name}' not found for Guider chain. Skipping.")
|
| 14 |
+
return
|
| 15 |
+
vae_node_id = assembler.node_map[vae_node_name]
|
| 16 |
+
|
| 17 |
+
print(f"ReferenceLatent injector targeting DualCFGGuider node '{guider_node_name}'.")
|
| 18 |
+
|
| 19 |
+
current_connections = {}
|
| 20 |
+
for target_input in guider_target_inputs:
|
| 21 |
+
conn_str = start_connections_map.get(target_input)
|
| 22 |
+
if not conn_str:
|
| 23 |
+
print(f"Warning: No start connection defined for '{target_input}' in Guider chain. Skipping this input.")
|
| 24 |
+
continue
|
| 25 |
+
try:
|
| 26 |
+
node_name, idx_str = conn_str.split(':')
|
| 27 |
+
node_id = assembler.node_map[node_name]
|
| 28 |
+
current_connections[target_input] = [node_id, int(idx_str)]
|
| 29 |
+
except (ValueError, KeyError):
|
| 30 |
+
print(f"Warning: Invalid start connection '{conn_str}' for '{target_input}'. Skipping.")
|
| 31 |
+
|
| 32 |
+
encoded_latents = []
|
| 33 |
+
for i, img_filename in enumerate(chain_items):
|
| 34 |
+
load_id = assembler._get_unique_id()
|
| 35 |
+
load_node = assembler._get_node_template("LoadImage")
|
| 36 |
+
load_node['inputs']['image'] = img_filename
|
| 37 |
+
assembler.workflow[load_id] = load_node
|
| 38 |
+
|
| 39 |
+
scale_id = assembler._get_unique_id()
|
| 40 |
+
scale_node = assembler._get_node_template("ImageScaleToTotalPixels")
|
| 41 |
+
scale_node['inputs']['megapixels'] = 1.0
|
| 42 |
+
scale_node['inputs']['upscale_method'] = "lanczos"
|
| 43 |
+
scale_node['inputs']['image'] = [load_id, 0]
|
| 44 |
+
assembler.workflow[scale_id] = scale_node
|
| 45 |
+
|
| 46 |
+
vae_encode_id = assembler._get_unique_id()
|
| 47 |
+
vae_encode_node = assembler._get_node_template("VAEEncode")
|
| 48 |
+
vae_encode_node['inputs']['pixels'] = [scale_id, 0]
|
| 49 |
+
vae_encode_node['inputs']['vae'] = [vae_node_id, 0]
|
| 50 |
+
assembler.workflow[vae_encode_id] = vae_encode_node
|
| 51 |
+
encoded_latents.append([vae_encode_id, 0])
|
| 52 |
+
|
| 53 |
+
for target_input_name, start_connection in current_connections.items():
|
| 54 |
+
current_chain_head = start_connection
|
| 55 |
+
for i, latent_conn in enumerate(encoded_latents):
|
| 56 |
+
ref_latent_id = assembler._get_unique_id()
|
| 57 |
+
ref_latent_node = assembler._get_node_template("ReferenceLatent")
|
| 58 |
+
ref_latent_node['inputs']['conditioning'] = current_chain_head
|
| 59 |
+
ref_latent_node['inputs']['latent'] = latent_conn
|
| 60 |
+
ref_latent_node['_meta']['title'] = f"{target_input_name} RefLatent {i+1}"
|
| 61 |
+
assembler.workflow[ref_latent_id] = ref_latent_node
|
| 62 |
+
current_chain_head = [ref_latent_id, 0]
|
| 63 |
+
|
| 64 |
+
assembler.workflow[guider_id]['inputs'][target_input_name] = current_chain_head
|
| 65 |
+
print(f" - Input '{target_input_name}' of node '{guider_node_name}' re-routed through {len(chain_items)} reference images.")
|
| 66 |
+
|
| 67 |
+
return
|
| 68 |
+
|
| 69 |
+
flux_guidance_name = chain_definition.get('flux_guidance_node')
|
| 70 |
+
ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
|
| 71 |
+
|
| 72 |
+
if ksampler_name not in assembler.node_map:
|
| 73 |
+
print(f"Warning: KSampler node '{ksampler_name}' not found for ReferenceLatent chain. Skipping.")
|
| 74 |
+
return
|
| 75 |
+
if vae_node_name not in assembler.node_map:
|
| 76 |
+
print(f"Warning: VAE loader node '{vae_node_name}' not found for ReferenceLatent chain. Skipping.")
|
| 77 |
+
return
|
| 78 |
+
|
| 79 |
+
ksampler_id = assembler.node_map[ksampler_name]
|
| 80 |
+
vae_node_id = assembler.node_map[vae_node_name]
|
| 81 |
+
|
| 82 |
+
pos_target_node_id = None
|
| 83 |
+
pos_target_input_name = None
|
| 84 |
+
if flux_guidance_name and flux_guidance_name in assembler.node_map:
|
| 85 |
+
flux_guidance_id = assembler.node_map[flux_guidance_name]
|
| 86 |
+
if 'conditioning' in assembler.workflow[flux_guidance_id]['inputs']:
|
| 87 |
+
pos_target_node_id = flux_guidance_id
|
| 88 |
+
pos_target_input_name = 'conditioning'
|
| 89 |
+
print(f"ReferenceLatent injector targeting FluxGuidance node '{flux_guidance_name}' for positive chain.")
|
| 90 |
+
|
| 91 |
+
if not pos_target_node_id:
|
| 92 |
+
if 'positive' in assembler.workflow[ksampler_id]['inputs']:
|
| 93 |
+
pos_target_node_id = ksampler_id
|
| 94 |
+
pos_target_input_name = 'positive'
|
| 95 |
+
print(f"ReferenceLatent injector targeting KSampler node '{ksampler_name}' for positive chain.")
|
| 96 |
+
else:
|
| 97 |
+
print(f"Warning: Could not find a valid positive injection point for ReferenceLatent chain. Skipping.")
|
| 98 |
+
return
|
| 99 |
+
|
| 100 |
+
current_pos_conditioning = assembler.workflow[pos_target_node_id]['inputs'][pos_target_input_name]
|
| 101 |
+
|
| 102 |
+
neg_target_node_id = ksampler_id
|
| 103 |
+
neg_target_input_name = 'negative'
|
| 104 |
+
if 'negative' not in assembler.workflow[neg_target_node_id]['inputs']:
|
| 105 |
+
print(f"Warning: KSampler node '{ksampler_name}' has no 'negative' input. Skipping negative ReferenceLatent chain.")
|
| 106 |
+
neg_target_node_id = None
|
| 107 |
+
|
| 108 |
+
current_neg_conditioning = None
|
| 109 |
+
if neg_target_node_id:
|
| 110 |
+
current_neg_conditioning = assembler.workflow[neg_target_node_id]['inputs'][neg_target_input_name]
|
| 111 |
+
|
| 112 |
+
for i, img_filename in enumerate(chain_items):
|
| 113 |
+
load_id = assembler._get_unique_id()
|
| 114 |
+
load_node = assembler._get_node_template("LoadImage")
|
| 115 |
+
load_node['inputs']['image'] = img_filename
|
| 116 |
+
load_node['_meta']['title'] = f"Load Reference Image {i+1}"
|
| 117 |
+
assembler.workflow[load_id] = load_node
|
| 118 |
+
|
| 119 |
+
scale_id = assembler._get_unique_id()
|
| 120 |
+
scale_node = assembler._get_node_template("ImageScaleToTotalPixels")
|
| 121 |
+
scale_node['inputs']['megapixels'] = 1.0
|
| 122 |
+
scale_node['inputs']['upscale_method'] = "lanczos"
|
| 123 |
+
scale_node['inputs']['image'] = [load_id, 0]
|
| 124 |
+
scale_node['_meta']['title'] = f"Scale Reference {i+1}"
|
| 125 |
+
assembler.workflow[scale_id] = scale_node
|
| 126 |
+
|
| 127 |
+
vae_encode_id = assembler._get_unique_id()
|
| 128 |
+
vae_encode_node = assembler._get_node_template("VAEEncode")
|
| 129 |
+
vae_encode_node['inputs']['pixels'] = [scale_id, 0]
|
| 130 |
+
vae_encode_node['inputs']['vae'] = [vae_node_id, 0]
|
| 131 |
+
vae_encode_node['_meta']['title'] = f"VAE Encode Reference {i+1}"
|
| 132 |
+
assembler.workflow[vae_encode_id] = vae_encode_node
|
| 133 |
+
|
| 134 |
+
latent_conn = [vae_encode_id, 0]
|
| 135 |
+
|
| 136 |
+
pos_ref_latent_id = assembler._get_unique_id()
|
| 137 |
+
pos_ref_latent_node = assembler._get_node_template("ReferenceLatent")
|
| 138 |
+
pos_ref_latent_node['inputs']['conditioning'] = current_pos_conditioning
|
| 139 |
+
pos_ref_latent_node['inputs']['latent'] = latent_conn
|
| 140 |
+
pos_ref_latent_node['_meta']['title'] = f"Positive ReferenceLatent {i+1}"
|
| 141 |
+
assembler.workflow[pos_ref_latent_id] = pos_ref_latent_node
|
| 142 |
+
current_pos_conditioning = [pos_ref_latent_id, 0]
|
| 143 |
+
|
| 144 |
+
if neg_target_node_id:
|
| 145 |
+
neg_ref_latent_id = assembler._get_unique_id()
|
| 146 |
+
neg_ref_latent_node = assembler._get_node_template("ReferenceLatent")
|
| 147 |
+
neg_ref_latent_node['inputs']['conditioning'] = current_neg_conditioning
|
| 148 |
+
neg_ref_latent_node['inputs']['latent'] = latent_conn
|
| 149 |
+
neg_ref_latent_node['_meta']['title'] = f"Negative ReferenceLatent {i+1}"
|
| 150 |
+
assembler.workflow[neg_ref_latent_id] = neg_ref_latent_node
|
| 151 |
+
current_neg_conditioning = [neg_ref_latent_id, 0]
|
| 152 |
+
|
| 153 |
+
assembler.workflow[pos_target_node_id]['inputs'][pos_target_input_name] = current_pos_conditioning
|
| 154 |
+
if neg_target_node_id:
|
| 155 |
+
assembler.workflow[neg_target_node_id]['inputs'][neg_target_input_name] = current_neg_conditioning
|
| 156 |
+
|
| 157 |
+
print(f"ReferenceLatent injector applied. Re-routed inputs through {len(chain_items)} reference images.")
|
chain_injectors/sd3_ipadapter_injector.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def inject(assembler, chain_definition, chain_items):
|
| 2 |
+
if not chain_items:
|
| 3 |
+
return
|
| 4 |
+
|
| 5 |
+
ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
|
| 6 |
+
if ksampler_name not in assembler.node_map:
|
| 7 |
+
print(f"Warning: KSampler node '{ksampler_name}' not found for SD3 IPAdapter chain. Skipping.")
|
| 8 |
+
return
|
| 9 |
+
|
| 10 |
+
ksampler_id = assembler.node_map[ksampler_name]
|
| 11 |
+
|
| 12 |
+
if 'model' not in assembler.workflow[ksampler_id]['inputs']:
|
| 13 |
+
print(f"Warning: KSampler node '{ksampler_name}' is missing 'model' input. Skipping SD3 IPAdapter chain.")
|
| 14 |
+
return
|
| 15 |
+
|
| 16 |
+
current_model_connection = assembler.workflow[ksampler_id]['inputs']['model']
|
| 17 |
+
|
| 18 |
+
clip_vision_loader_id = assembler._get_unique_id()
|
| 19 |
+
clip_vision_loader_node = assembler._get_node_template("CLIPVisionLoader")
|
| 20 |
+
clip_vision_loader_node['inputs']['clip_name'] = "sigclip_vision_patch14_384.safetensors"
|
| 21 |
+
assembler.workflow[clip_vision_loader_id] = clip_vision_loader_node
|
| 22 |
+
|
| 23 |
+
ipadapter_loader_id = assembler._get_unique_id()
|
| 24 |
+
ipadapter_loader_node = assembler._get_node_template("IPAdapterSD3Loader")
|
| 25 |
+
ipadapter_loader_node['inputs']['ipadapter'] = "ip-adapter_sd35l_instantx.bin"
|
| 26 |
+
ipadapter_loader_node['inputs']['provider'] = "cuda"
|
| 27 |
+
assembler.workflow[ipadapter_loader_id] = ipadapter_loader_node
|
| 28 |
+
|
| 29 |
+
for item_data in chain_items:
|
| 30 |
+
image_loader_id = assembler._get_unique_id()
|
| 31 |
+
image_loader_node = assembler._get_node_template("LoadImage")
|
| 32 |
+
image_loader_node['inputs']['image'] = item_data['image']
|
| 33 |
+
assembler.workflow[image_loader_id] = image_loader_node
|
| 34 |
+
|
| 35 |
+
image_scaler_id = assembler._get_unique_id()
|
| 36 |
+
image_scaler_node = assembler._get_node_template("ImageScaleToTotalPixels")
|
| 37 |
+
image_scaler_node['inputs']['image'] = [image_loader_id, 0]
|
| 38 |
+
image_scaler_node['inputs']['upscale_method'] = 'nearest-exact'
|
| 39 |
+
image_scaler_node['inputs']['megapixels'] = 1.0
|
| 40 |
+
assembler.workflow[image_scaler_id] = image_scaler_node
|
| 41 |
+
|
| 42 |
+
clip_vision_encode_id = assembler._get_unique_id()
|
| 43 |
+
clip_vision_encode_node = assembler._get_node_template("CLIPVisionEncode")
|
| 44 |
+
clip_vision_encode_node['inputs']['crop'] = "center"
|
| 45 |
+
clip_vision_encode_node['inputs']['clip_vision'] = [clip_vision_loader_id, 0]
|
| 46 |
+
clip_vision_encode_node['inputs']['image'] = [image_scaler_id, 0]
|
| 47 |
+
assembler.workflow[clip_vision_encode_id] = clip_vision_encode_node
|
| 48 |
+
|
| 49 |
+
apply_ipa_id = assembler._get_unique_id()
|
| 50 |
+
apply_ipa_node = assembler._get_node_template("ApplyIPAdapterSD3")
|
| 51 |
+
|
| 52 |
+
apply_ipa_node['inputs']['weight'] = item_data.get('weight', 1.0)
|
| 53 |
+
apply_ipa_node['inputs']['start_percent'] = item_data.get('start_percent', 0.0)
|
| 54 |
+
apply_ipa_node['inputs']['end_percent'] = item_data.get('end_percent', 1.0)
|
| 55 |
+
|
| 56 |
+
apply_ipa_node['inputs']['model'] = current_model_connection
|
| 57 |
+
apply_ipa_node['inputs']['ipadapter'] = [ipadapter_loader_id, 0]
|
| 58 |
+
apply_ipa_node['inputs']['image_embed'] = [clip_vision_encode_id, 0]
|
| 59 |
+
|
| 60 |
+
assembler.workflow[apply_ipa_id] = apply_ipa_node
|
| 61 |
+
|
| 62 |
+
current_model_connection = [apply_ipa_id, 0]
|
| 63 |
+
|
| 64 |
+
assembler.workflow[ksampler_id]['inputs']['model'] = current_model_connection
|
| 65 |
+
|
| 66 |
+
print(f"SD3 IPAdapter injector applied. KSampler model input re-routed through {len(chain_items)} IPAdapter(s).")
|
chain_injectors/style_injector.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def inject(assembler, chain_definition, chain_items):
|
| 2 |
+
if not chain_items:
|
| 3 |
+
return
|
| 4 |
+
|
| 5 |
+
flux_guidance_name = chain_definition.get('flux_guidance_node')
|
| 6 |
+
ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
|
| 7 |
+
|
| 8 |
+
target_node_id = None
|
| 9 |
+
target_input_name = None
|
| 10 |
+
|
| 11 |
+
if flux_guidance_name and flux_guidance_name in assembler.node_map:
|
| 12 |
+
flux_guidance_id = assembler.node_map[flux_guidance_name]
|
| 13 |
+
if 'conditioning' in assembler.workflow[flux_guidance_id]['inputs']:
|
| 14 |
+
target_node_id = flux_guidance_id
|
| 15 |
+
target_input_name = 'conditioning'
|
| 16 |
+
|
| 17 |
+
if not target_node_id:
|
| 18 |
+
if ksampler_name in assembler.node_map:
|
| 19 |
+
ksampler_id = assembler.node_map[ksampler_name]
|
| 20 |
+
if 'positive' in assembler.workflow[ksampler_id]['inputs']:
|
| 21 |
+
target_node_id = ksampler_id
|
| 22 |
+
target_input_name = 'positive'
|
| 23 |
+
else:
|
| 24 |
+
return
|
| 25 |
+
|
| 26 |
+
if not target_node_id:
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
current_conditioning = assembler.workflow[target_node_id]['inputs'][target_input_name]
|
| 30 |
+
|
| 31 |
+
style_model_loader_id = assembler._get_unique_id()
|
| 32 |
+
style_model_loader_node = assembler._get_node_template("StyleModelLoader")
|
| 33 |
+
style_model_loader_node['inputs']['style_model_name'] = "flux1-redux-dev.safetensors"
|
| 34 |
+
assembler.workflow[style_model_loader_id] = style_model_loader_node
|
| 35 |
+
|
| 36 |
+
clip_vision_loader_id = assembler._get_unique_id()
|
| 37 |
+
clip_vision_loader_node = assembler._get_node_template("CLIPVisionLoader")
|
| 38 |
+
clip_vision_loader_node['inputs']['clip_name'] = "sigclip_vision_patch14_384.safetensors"
|
| 39 |
+
assembler.workflow[clip_vision_loader_id] = clip_vision_loader_node
|
| 40 |
+
|
| 41 |
+
for item_data in chain_items:
|
| 42 |
+
image = item_data.get('image')
|
| 43 |
+
strength = item_data.get('strength', 1.0)
|
| 44 |
+
if not image or strength is None:
|
| 45 |
+
continue
|
| 46 |
+
|
| 47 |
+
load_image_id = assembler._get_unique_id()
|
| 48 |
+
clip_vision_encode_id = assembler._get_unique_id()
|
| 49 |
+
style_apply_id = assembler._get_unique_id()
|
| 50 |
+
|
| 51 |
+
load_image_node = assembler._get_node_template("LoadImage")
|
| 52 |
+
clip_vision_encode_node = assembler._get_node_template("CLIPVisionEncode")
|
| 53 |
+
style_apply_node = assembler._get_node_template("StyleModelApply")
|
| 54 |
+
|
| 55 |
+
load_image_node['inputs']['image'] = image
|
| 56 |
+
clip_vision_encode_node['inputs']['crop'] = "center"
|
| 57 |
+
clip_vision_encode_node['inputs']['clip_vision'] = [clip_vision_loader_id, 0]
|
| 58 |
+
clip_vision_encode_node['inputs']['image'] = [load_image_id, 0]
|
| 59 |
+
|
| 60 |
+
style_apply_node['inputs']['strength'] = strength
|
| 61 |
+
style_apply_node['inputs']['strength_type'] = "multiply"
|
| 62 |
+
style_apply_node['inputs']['conditioning'] = current_conditioning
|
| 63 |
+
style_apply_node['inputs']['style_model'] = [style_model_loader_id, 0]
|
| 64 |
+
style_apply_node['inputs']['clip_vision_output'] = [clip_vision_encode_id, 0]
|
| 65 |
+
|
| 66 |
+
assembler.workflow[load_image_id] = load_image_node
|
| 67 |
+
assembler.workflow[clip_vision_encode_id] = clip_vision_encode_node
|
| 68 |
+
assembler.workflow[style_apply_id] = style_apply_node
|
| 69 |
+
current_conditioning = [style_apply_id, 0]
|
| 70 |
+
|
| 71 |
+
assembler.workflow[target_node_id]['inputs'][target_input_name] = current_conditioning
|
comfy_integration/nodes.py
CHANGED
|
@@ -23,6 +23,11 @@ CLIPTextEncodeSDXL = NODE_CLASS_MAPPINGS['CLIPTextEncodeSDXL']
|
|
| 23 |
LoraLoader = NODE_CLASS_MAPPINGS['LoraLoader']
|
| 24 |
CLIPSetLastLayer = NODE_CLASS_MAPPINGS['CLIPSetLastLayer']
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
try:
|
| 27 |
KSamplerNode = NODE_CLASS_MAPPINGS['KSampler']
|
| 28 |
SAMPLER_CHOICES = KSamplerNode.INPUT_TYPES()["required"]["sampler_name"][0]
|
|
|
|
| 23 |
LoraLoader = NODE_CLASS_MAPPINGS['LoraLoader']
|
| 24 |
CLIPSetLastLayer = NODE_CLASS_MAPPINGS['CLIPSetLastLayer']
|
| 25 |
|
| 26 |
+
if 'EmptyHunyuanImageLatent' in NODE_CLASS_MAPPINGS:
|
| 27 |
+
EmptyHunyuanImageLatent = NODE_CLASS_MAPPINGS['EmptyHunyuanImageLatent']
|
| 28 |
+
else:
|
| 29 |
+
print("⚠️ Warning: 'EmptyHunyuanImageLatent' not found in NODE_CLASS_MAPPINGS. HunyuanImage txt2img may fail if this node is required.")
|
| 30 |
+
|
| 31 |
try:
|
| 32 |
KSamplerNode = NODE_CLASS_MAPPINGS['KSampler']
|
| 33 |
SAMPLER_CHOICES = KSamplerNode.INPUT_TYPES()["required"]["sampler_name"][0]
|
comfy_integration/setup.py
CHANGED
|
@@ -41,13 +41,8 @@ def initialize_comfyui():
|
|
| 41 |
|
| 42 |
|
| 43 |
print("--- Cloning third-party extensions for ComfyUI ---")
|
| 44 |
-
controlnet_aux_path = os.path.join(APP_DIR, "custom_nodes", "comfyui_controlnet_aux")
|
| 45 |
-
if not os.path.exists(controlnet_aux_path):
|
| 46 |
-
os.system(f"git clone https://github.com/Fannovel16/comfyui_controlnet_aux.git {controlnet_aux_path}")
|
| 47 |
-
print("✅ comfyui_controlnet_aux extension cloned.")
|
| 48 |
-
else:
|
| 49 |
-
print("✅ comfyui_controlnet_aux extension already exists.")
|
| 50 |
|
|
|
|
| 51 |
ipadapter_plus_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI_IPAdapter_plus")
|
| 52 |
if not os.path.exists(ipadapter_plus_path):
|
| 53 |
os.system(f"git clone https://github.com/cubiq/ComfyUI_IPAdapter_plus.git {ipadapter_plus_path}")
|
|
@@ -55,6 +50,30 @@ def initialize_comfyui():
|
|
| 55 |
else:
|
| 56 |
print("✅ ComfyUI_IPAdapter_plus extension already exists.")
|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
print(f"✅ Current working directory is: {os.getcwd()}")
|
| 59 |
|
| 60 |
import comfy.model_management
|
|
@@ -62,12 +81,10 @@ def initialize_comfyui():
|
|
| 62 |
|
| 63 |
print("✅ ComfyUI initialized with default attention mechanism.")
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
os.makedirs(os.path.join(APP_DIR, CONTROLNET_DIR), exist_ok=True)
|
| 69 |
-
os.makedirs(os.path.join(APP_DIR, DIFFUSION_MODELS_DIR), exist_ok=True)
|
| 70 |
-
os.makedirs(os.path.join(APP_DIR, VAE_DIR), exist_ok=True)
|
| 71 |
-
os.makedirs(os.path.join(APP_DIR, TEXT_ENCODERS_DIR), exist_ok=True)
|
| 72 |
os.makedirs(os.path.join(APP_DIR, INPUT_DIR), exist_ok=True)
|
|
|
|
|
|
|
| 73 |
print("✅ All required model directories are present.")
|
|
|
|
| 41 |
|
| 42 |
|
| 43 |
print("--- Cloning third-party extensions for ComfyUI ---")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
+
# 1. ComfyUI_IPAdapter_plus
|
| 46 |
ipadapter_plus_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI_IPAdapter_plus")
|
| 47 |
if not os.path.exists(ipadapter_plus_path):
|
| 48 |
os.system(f"git clone https://github.com/cubiq/ComfyUI_IPAdapter_plus.git {ipadapter_plus_path}")
|
|
|
|
| 50 |
else:
|
| 51 |
print("✅ ComfyUI_IPAdapter_plus extension already exists.")
|
| 52 |
|
| 53 |
+
# 2. ComfyUI-InstantX-IPAdapter-SD3
|
| 54 |
+
ipadapter_plus_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI-InstantX-IPAdapter-SD3")
|
| 55 |
+
if not os.path.exists(ipadapter_plus_path):
|
| 56 |
+
os.system(f"git clone https://github.com/Slickytail/ComfyUI-InstantX-IPAdapter-SD3.git {ipadapter_plus_path}")
|
| 57 |
+
print("✅ ComfyUI-InstantX-IPAdapter-SD3 extension cloned.")
|
| 58 |
+
else:
|
| 59 |
+
print("✅ ComfyUI-InstantX-IPAdapter-SD3 extension already exists.")
|
| 60 |
+
|
| 61 |
+
# 3. ComfyUI-IPAdapter-Flux
|
| 62 |
+
ipadapter_flux_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI-IPAdapter-Flux")
|
| 63 |
+
if not os.path.exists(ipadapter_flux_path):
|
| 64 |
+
os.system(f"git clone https://github.com/Shakker-Labs/ComfyUI-IPAdapter-Flux.git {ipadapter_flux_path}")
|
| 65 |
+
print("✅ ComfyUI-IPAdapter-Flux extension cloned.")
|
| 66 |
+
else:
|
| 67 |
+
print("✅ ComfyUI-IPAdapter-Flux extension already exists.")
|
| 68 |
+
|
| 69 |
+
# 4. ComfyUI-Newbie-Nodes
|
| 70 |
+
newbie_nodes_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI-Newbie-Nodes")
|
| 71 |
+
if not os.path.exists(newbie_nodes_path):
|
| 72 |
+
os.system(f"git clone https://github.com/NewBieAI-Lab/ComfyUI-Newbie-Nodes.git {newbie_nodes_path}")
|
| 73 |
+
print("✅ ComfyUI-Newbie-Nodes extension cloned.")
|
| 74 |
+
else:
|
| 75 |
+
print("✅ ComfyUI-Newbie-Nodes extension already exists.")
|
| 76 |
+
|
| 77 |
print(f"✅ Current working directory is: {os.getcwd()}")
|
| 78 |
|
| 79 |
import comfy.model_management
|
|
|
|
| 81 |
|
| 82 |
print("✅ ComfyUI initialized with default attention mechanism.")
|
| 83 |
|
| 84 |
+
for dir_path in CATEGORY_TO_DIR_MAP.values():
|
| 85 |
+
os.makedirs(os.path.join(APP_DIR, dir_path), exist_ok=True)
|
| 86 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
os.makedirs(os.path.join(APP_DIR, INPUT_DIR), exist_ok=True)
|
| 88 |
+
os.makedirs(os.path.join(APP_DIR, OUTPUT_DIR), exist_ok=True)
|
| 89 |
+
|
| 90 |
print("✅ All required model directories are present.")
|
core/generation_logic.py
CHANGED
|
@@ -1,25 +1,10 @@
|
|
| 1 |
from typing import Any, Dict
|
| 2 |
import gradio as gr
|
| 3 |
|
| 4 |
-
from core.pipelines.controlnet_preprocessor import ControlNetPreprocessorPipeline
|
| 5 |
from core.pipelines.sd_image_pipeline import SdImagePipeline
|
| 6 |
|
| 7 |
-
controlnet_preprocessor_pipeline = ControlNetPreprocessorPipeline()
|
| 8 |
sd_image_pipeline = SdImagePipeline()
|
| 9 |
|
| 10 |
|
| 11 |
-
def build_reverse_map():
|
| 12 |
-
from nodes import NODE_DISPLAY_NAME_MAPPINGS
|
| 13 |
-
import core.pipelines.controlnet_preprocessor as cn_module
|
| 14 |
-
|
| 15 |
-
if cn_module.REVERSE_DISPLAY_NAME_MAP is None:
|
| 16 |
-
cn_module.REVERSE_DISPLAY_NAME_MAP = {v: k for k, v in NODE_DISPLAY_NAME_MAPPINGS.items()}
|
| 17 |
-
if "Semantic Segmentor (legacy, alias for UniFormer)" not in cn_module.REVERSE_DISPLAY_NAME_MAP:
|
| 18 |
-
cn_module.REVERSE_DISPLAY_NAME_MAP["Semantic Segmentor (legacy, alias for UniFormer)"] = "SemSegPreprocessor"
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
def run_cn_preprocessor_entry(*args, **kwargs):
|
| 22 |
-
return controlnet_preprocessor_pipeline.run(*args, **kwargs)
|
| 23 |
-
|
| 24 |
def generate_image_wrapper(ui_inputs: dict, progress=gr.Progress(track_tqdm=True)):
|
| 25 |
return sd_image_pipeline.run(ui_inputs=ui_inputs, progress=progress)
|
|
|
|
| 1 |
from typing import Any, Dict
|
| 2 |
import gradio as gr
|
| 3 |
|
|
|
|
| 4 |
from core.pipelines.sd_image_pipeline import SdImagePipeline
|
| 5 |
|
|
|
|
| 6 |
sd_image_pipeline = SdImagePipeline()
|
| 7 |
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
def generate_image_wrapper(ui_inputs: dict, progress=gr.Progress(track_tqdm=True)):
|
| 10 |
return sd_image_pipeline.run(ui_inputs=ui_inputs, progress=progress)
|
core/model_manager.py
CHANGED
|
@@ -1,9 +1,8 @@
|
|
| 1 |
import gc
|
| 2 |
from typing import List
|
| 3 |
import gradio as gr
|
| 4 |
-
|
| 5 |
-
from core.settings import ALL_MODEL_MAP
|
| 6 |
from utils.app_utils import _ensure_model_downloaded
|
|
|
|
| 7 |
|
| 8 |
class ModelManager:
|
| 9 |
_instance = None
|
|
@@ -23,11 +22,11 @@ class ModelManager:
|
|
| 23 |
print(f"--- [ModelManager] Ensuring models are downloaded: {required_models} ---")
|
| 24 |
for i, display_name in enumerate(required_models):
|
| 25 |
if progress and hasattr(progress, '__call__'):
|
| 26 |
-
progress(i / len(required_models), desc=f"Checking file: {display_name}")
|
| 27 |
try:
|
| 28 |
_ensure_model_downloaded(display_name, progress)
|
| 29 |
except Exception as e:
|
| 30 |
raise gr.Error(f"Failed to download model '{display_name}'. Reason: {e}")
|
| 31 |
print(f"--- [ModelManager] ✅ All required models are present on disk. ---")
|
| 32 |
-
|
| 33 |
model_manager = ModelManager()
|
|
|
|
| 1 |
import gc
|
| 2 |
from typing import List
|
| 3 |
import gradio as gr
|
|
|
|
|
|
|
| 4 |
from utils.app_utils import _ensure_model_downloaded
|
| 5 |
+
from core.settings import ALL_MODEL_MAP
|
| 6 |
|
| 7 |
class ModelManager:
|
| 8 |
_instance = None
|
|
|
|
| 22 |
print(f"--- [ModelManager] Ensuring models are downloaded: {required_models} ---")
|
| 23 |
for i, display_name in enumerate(required_models):
|
| 24 |
if progress and hasattr(progress, '__call__'):
|
| 25 |
+
progress(i / max(len(required_models), 1), desc=f"Checking file: {display_name}")
|
| 26 |
try:
|
| 27 |
_ensure_model_downloaded(display_name, progress)
|
| 28 |
except Exception as e:
|
| 29 |
raise gr.Error(f"Failed to download model '{display_name}'. Reason: {e}")
|
| 30 |
print(f"--- [ModelManager] ✅ All required models are present on disk. ---")
|
| 31 |
+
|
| 32 |
model_manager = ModelManager()
|
core/pipelines/controlnet_preprocessor.py
DELETED
|
@@ -1,143 +0,0 @@
|
|
| 1 |
-
from typing import Dict, Any, List
|
| 2 |
-
import imageio
|
| 3 |
-
import tempfile
|
| 4 |
-
import numpy as np
|
| 5 |
-
import torch
|
| 6 |
-
import gradio as gr
|
| 7 |
-
from PIL import Image
|
| 8 |
-
import spaces
|
| 9 |
-
|
| 10 |
-
from .base_pipeline import BasePipeline
|
| 11 |
-
from comfy_integration.nodes import NODE_CLASS_MAPPINGS
|
| 12 |
-
from nodes import NODE_DISPLAY_NAME_MAPPINGS
|
| 13 |
-
from utils.app_utils import get_value_at_index
|
| 14 |
-
|
| 15 |
-
REVERSE_DISPLAY_NAME_MAP = None
|
| 16 |
-
CPU_ONLY_PREPROCESSORS = {
|
| 17 |
-
"Binary Lines", "Canny Edge", "Color Pallete", "Fake Scribble Lines (aka scribble_hed)",
|
| 18 |
-
"Image Intensity", "Image Luminance", "Inpaint Preprocessor", "PyraCanny", "Scribble Lines",
|
| 19 |
-
"Scribble XDoG Lines", "Standard Lineart", "Content Shuffle", "Tile"
|
| 20 |
-
}
|
| 21 |
-
|
| 22 |
-
def run_node_by_function_name(node_instance: Any, **kwargs) -> Any:
|
| 23 |
-
node_class = type(node_instance)
|
| 24 |
-
function_name = getattr(node_class, 'FUNCTION', None)
|
| 25 |
-
if not function_name:
|
| 26 |
-
raise AttributeError(f"Node class '{node_class.__name__}' is missing the required 'FUNCTION' attribute.")
|
| 27 |
-
execution_method = getattr(node_instance, function_name, None)
|
| 28 |
-
if not callable(execution_method):
|
| 29 |
-
raise AttributeError(f"Method '{function_name}' not found or not callable on node '{node_class.__name__}'.")
|
| 30 |
-
return execution_method(**kwargs)
|
| 31 |
-
|
| 32 |
-
class ControlNetPreprocessorPipeline(BasePipeline):
|
| 33 |
-
def get_required_models(self, **kwargs) -> List[str]:
|
| 34 |
-
return []
|
| 35 |
-
|
| 36 |
-
def _gpu_logic(
|
| 37 |
-
self, pil_images: List[Image.Image], preprocessor_name: str, model_name: str,
|
| 38 |
-
params: Dict[str, Any], progress=gr.Progress(track_tqdm=True)
|
| 39 |
-
) -> List[Image.Image]:
|
| 40 |
-
global REVERSE_DISPLAY_NAME_MAP
|
| 41 |
-
if REVERSE_DISPLAY_NAME_MAP is None:
|
| 42 |
-
raise RuntimeError("REVERSE_DISPLAY_NAME_MAP has not been initialized. `build_reverse_map` must be called on startup.")
|
| 43 |
-
|
| 44 |
-
class_name = REVERSE_DISPLAY_NAME_MAP.get(preprocessor_name)
|
| 45 |
-
if not class_name or class_name not in NODE_CLASS_MAPPINGS:
|
| 46 |
-
raise ValueError(f"Preprocessor '{preprocessor_name}' not found.")
|
| 47 |
-
|
| 48 |
-
preprocessor_instance = NODE_CLASS_MAPPINGS[class_name]()
|
| 49 |
-
call_args = {**params, 'ckpt_name': model_name}
|
| 50 |
-
|
| 51 |
-
processed_pil_images = []
|
| 52 |
-
total_frames = len(pil_images)
|
| 53 |
-
|
| 54 |
-
for i, frame_pil in enumerate(pil_images):
|
| 55 |
-
progress(i / total_frames, desc=f"Processing frame {i+1}/{total_frames} with {preprocessor_name}...")
|
| 56 |
-
|
| 57 |
-
frame_tensor = torch.from_numpy(np.array(frame_pil).astype(np.float32) / 255.0).unsqueeze(0)
|
| 58 |
-
|
| 59 |
-
resolution_arg = {'resolution': max(frame_tensor.shape[2], frame_tensor.shape[3])}
|
| 60 |
-
|
| 61 |
-
result_tuple = run_node_by_function_name(
|
| 62 |
-
preprocessor_instance,
|
| 63 |
-
image=frame_tensor,
|
| 64 |
-
**resolution_arg,
|
| 65 |
-
**call_args
|
| 66 |
-
)
|
| 67 |
-
|
| 68 |
-
processed_tensor = get_value_at_index(result_tuple, 0)
|
| 69 |
-
processed_np = (processed_tensor.squeeze(0).cpu().numpy().clip(0, 1) * 255.0).astype(np.uint8)
|
| 70 |
-
processed_pil_images.append(Image.fromarray(processed_np))
|
| 71 |
-
|
| 72 |
-
return processed_pil_images
|
| 73 |
-
|
| 74 |
-
def run(self, input_type, image_input, video_input, preprocessor_name, model_name, zero_gpu_duration, *args, progress=gr.Progress(track_tqdm=True)):
|
| 75 |
-
from utils import app_utils
|
| 76 |
-
pil_images, is_video, fps = [], False, 30
|
| 77 |
-
|
| 78 |
-
progress(0, desc="Reading input file...")
|
| 79 |
-
if input_type == "Image":
|
| 80 |
-
if image_input is None: raise gr.Error("Please provide an input image.")
|
| 81 |
-
pil_images = [image_input]
|
| 82 |
-
elif input_type == "Video":
|
| 83 |
-
if video_input is None: raise gr.Error("Please provide an input video.")
|
| 84 |
-
try:
|
| 85 |
-
video_reader = imageio.get_reader(video_input)
|
| 86 |
-
meta = video_reader.get_meta_data()
|
| 87 |
-
fps = meta.get('fps', 30)
|
| 88 |
-
pil_images = [Image.fromarray(frame) for frame in video_reader]
|
| 89 |
-
is_video = True
|
| 90 |
-
video_reader.close()
|
| 91 |
-
except Exception as e: raise gr.Error(f"Failed to read video file: {e}")
|
| 92 |
-
else:
|
| 93 |
-
raise gr.Error("Invalid input type selected.")
|
| 94 |
-
|
| 95 |
-
if not pil_images: raise gr.Error("Could not extract any frames from the input.")
|
| 96 |
-
|
| 97 |
-
if app_utils.PREPROCESSOR_PARAMETER_MAP is None:
|
| 98 |
-
raise RuntimeError("Preprocessor parameter map is not built. Check startup logs.")
|
| 99 |
-
|
| 100 |
-
params_config = app_utils.PREPROCESSOR_PARAMETER_MAP.get(preprocessor_name, [])
|
| 101 |
-
sliders_params = [p for p in params_config if p['type'] in ["INT", "FLOAT"]]
|
| 102 |
-
dropdown_params = [p for p in params_config if isinstance(p['type'], list)]
|
| 103 |
-
checkbox_params = [p for p in params_config if p['type'] == "BOOLEAN"]
|
| 104 |
-
ordered_params_config = sliders_params + dropdown_params + checkbox_params
|
| 105 |
-
param_names = [p['name'] for p in ordered_params_config]
|
| 106 |
-
provided_params = {param_names[i]: args[i] for i in range(len(param_names))}
|
| 107 |
-
|
| 108 |
-
if preprocessor_name not in CPU_ONLY_PREPROCESSORS:
|
| 109 |
-
print(f"--- '{preprocessor_name}' requires GPU, requesting ZeroGPU. ---")
|
| 110 |
-
try:
|
| 111 |
-
processed_pil_images = self._execute_gpu_logic(
|
| 112 |
-
self._gpu_logic,
|
| 113 |
-
duration=zero_gpu_duration,
|
| 114 |
-
default_duration=60,
|
| 115 |
-
task_name=f"Preprocessor '{preprocessor_name}'",
|
| 116 |
-
pil_images=pil_images,
|
| 117 |
-
preprocessor_name=preprocessor_name,
|
| 118 |
-
model_name=model_name,
|
| 119 |
-
params=provided_params,
|
| 120 |
-
progress=progress
|
| 121 |
-
)
|
| 122 |
-
except Exception as e:
|
| 123 |
-
import traceback; traceback.print_exc()
|
| 124 |
-
raise gr.Error(f"Failed to run preprocessor '{preprocessor_name}' on GPU: {e}")
|
| 125 |
-
else:
|
| 126 |
-
print(f"--- Running '{preprocessor_name}' on CPU, no ZeroGPU requested. ---")
|
| 127 |
-
try:
|
| 128 |
-
processed_pil_images = self._gpu_logic(pil_images, preprocessor_name, model_name, provided_params, progress=progress)
|
| 129 |
-
except Exception as e:
|
| 130 |
-
import traceback; traceback.print_exc()
|
| 131 |
-
raise gr.Error(f"Failed to run preprocessor '{preprocessor_name}' on CPU: {e}")
|
| 132 |
-
|
| 133 |
-
if not processed_pil_images: raise gr.Error("Processing returned no frames.")
|
| 134 |
-
|
| 135 |
-
progress(0.9, desc="Finalizing output...")
|
| 136 |
-
if is_video:
|
| 137 |
-
frames_np = [np.array(img) for img in processed_pil_images]
|
| 138 |
-
frames_tensor = torch.from_numpy(np.stack(frames_np)).to(torch.float32) / 255.0
|
| 139 |
-
video_path = self._encode_video_from_frames(frames_tensor, fps, progress)
|
| 140 |
-
return [video_path]
|
| 141 |
-
else:
|
| 142 |
-
progress(1.0, desc="Done!")
|
| 143 |
-
return processed_pil_images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
core/pipelines/sd_image_pipeline.py
CHANGED
|
@@ -16,7 +16,15 @@ from core.workflow_assembler import WorkflowAssembler
|
|
| 16 |
|
| 17 |
class SdImagePipeline(BasePipeline):
|
| 18 |
def get_required_models(self, model_display_name: str, **kwargs) -> List[str]:
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def _topological_sort(self, workflow: Dict[str, Any]) -> List[str]:
|
| 22 |
graph = defaultdict(list)
|
|
@@ -118,7 +126,7 @@ class SdImagePipeline(BasePipeline):
|
|
| 118 |
progress(0.4, desc="Executing workflow...")
|
| 119 |
|
| 120 |
initial_objects = {}
|
| 121 |
-
|
| 122 |
decoded_images_tensor = self._execute_workflow(workflow, initial_objects=initial_objects)
|
| 123 |
|
| 124 |
output_images = []
|
|
@@ -134,6 +142,7 @@ class SdImagePipeline(BasePipeline):
|
|
| 134 |
params_string = f"{ui_inputs['positive_prompt']}\nNegative prompt: {ui_inputs['negative_prompt']}\n"
|
| 135 |
params_string += f"Steps: {ui_inputs['num_inference_steps']}, Sampler: {ui_inputs['sampler']}, Scheduler: {ui_inputs['scheduler']}, CFG scale: {ui_inputs['guidance_scale']}, Seed: {current_seed}, Size: {width_for_meta}x{height_for_meta}, Base Model: {model_display_name}"
|
| 136 |
if ui_inputs['task_type'] != 'txt2img': params_string += f", Denoise: {ui_inputs['denoise']}"
|
|
|
|
| 137 |
if loras_string: params_string += f", {loras_string}"
|
| 138 |
|
| 139 |
pil_image.info = {'parameters': params_string.strip()}
|
|
@@ -145,26 +154,34 @@ class SdImagePipeline(BasePipeline):
|
|
| 145 |
progress(0, desc="Preparing models...")
|
| 146 |
|
| 147 |
task_type = ui_inputs['task_type']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
|
| 149 |
ui_inputs['positive_prompt'] = sanitize_prompt(ui_inputs.get('positive_prompt', ''))
|
| 150 |
ui_inputs['negative_prompt'] = sanitize_prompt(ui_inputs.get('negative_prompt', ''))
|
| 151 |
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
self.model_manager.ensure_models_downloaded(required_models, progress=progress)
|
| 155 |
|
| 156 |
lora_data = ui_inputs.get('lora_data', [])
|
| 157 |
active_loras_for_gpu, active_loras_for_meta = [], []
|
| 158 |
if lora_data:
|
| 159 |
sources, ids, scales, files = lora_data[0::4], lora_data[1::4], lora_data[2::4], lora_data[3::4]
|
| 160 |
-
|
| 161 |
for i, (source, lora_id, scale, _) in enumerate(zip(sources, ids, scales, files)):
|
| 162 |
if scale > 0 and lora_id and lora_id.strip():
|
| 163 |
lora_filename = None
|
| 164 |
if source == "File":
|
| 165 |
lora_filename = sanitize_filename(lora_id)
|
| 166 |
elif source == "Civitai":
|
| 167 |
-
local_path, status = get_lora_path(source, lora_id,
|
| 168 |
if local_path: lora_filename = os.path.basename(local_path)
|
| 169 |
else: raise gr.Error(f"Failed to prepare LoRA {lora_id}: {status}")
|
| 170 |
|
|
@@ -177,7 +194,6 @@ class SdImagePipeline(BasePipeline):
|
|
| 177 |
elif task_type == 'hires_fix': ui_inputs['denoise'] = ui_inputs.get('hires_denoise', 0.55)
|
| 178 |
|
| 179 |
temp_files_to_clean = []
|
| 180 |
-
|
| 181 |
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 182 |
|
| 183 |
if task_type == 'img2img':
|
|
@@ -196,7 +212,6 @@ class SdImagePipeline(BasePipeline):
|
|
| 196 |
raise gr.Error("Inpainting requires an input image and a drawn mask.")
|
| 197 |
|
| 198 |
background_img = inpaint_dict['background'].convert("RGBA")
|
| 199 |
-
|
| 200 |
composite_mask_pil = Image.new('L', background_img.size, 0)
|
| 201 |
for layer in inpaint_dict['layers']:
|
| 202 |
if layer:
|
|
@@ -210,7 +225,7 @@ class SdImagePipeline(BasePipeline):
|
|
| 210 |
temp_file_path = os.path.join(INPUT_DIR, f"temp_inpaint_composite_{random.randint(1000, 9999)}.png")
|
| 211 |
composite_image_with_mask.save(temp_file_path, "PNG")
|
| 212 |
|
| 213 |
-
ui_inputs['
|
| 214 |
temp_files_to_clean.append(temp_file_path)
|
| 215 |
ui_inputs.pop('inpaint_mask', None)
|
| 216 |
|
|
@@ -221,6 +236,9 @@ class SdImagePipeline(BasePipeline):
|
|
| 221 |
input_image_pil.save(temp_file_path, "PNG")
|
| 222 |
ui_inputs['input_image'] = os.path.basename(temp_file_path)
|
| 223 |
temp_files_to_clean.append(temp_file_path)
|
|
|
|
|
|
|
|
|
|
| 224 |
|
| 225 |
elif task_type == 'hires_fix':
|
| 226 |
input_image_pil = ui_inputs.get('hires_image')
|
|
@@ -240,7 +258,7 @@ class SdImagePipeline(BasePipeline):
|
|
| 240 |
if source == "File":
|
| 241 |
emb_filename = sanitize_filename(emb_id)
|
| 242 |
elif source == "Civitai":
|
| 243 |
-
local_path, status = get_embedding_path(source, emb_id,
|
| 244 |
if local_path: emb_filename = os.path.basename(local_path)
|
| 245 |
else: raise gr.Error(f"Failed to prepare Embedding {emb_id}: {status}")
|
| 246 |
|
|
@@ -256,19 +274,36 @@ class SdImagePipeline(BasePipeline):
|
|
| 256 |
|
| 257 |
controlnet_data = ui_inputs.get('controlnet_data', [])
|
| 258 |
active_controlnets = []
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 272 |
|
| 273 |
ipadapter_data = ui_inputs.get('ipadapter_data', [])
|
| 274 |
active_ipadapters = []
|
|
@@ -276,13 +311,10 @@ class SdImagePipeline(BasePipeline):
|
|
| 276 |
num_ipa_units = (len(ipadapter_data) - 5) // 3
|
| 277 |
final_preset, final_weight, final_lora_strength, final_embeds_scaling, final_combine_method = ipadapter_data[-5:]
|
| 278 |
ipa_images, ipa_weights, ipa_lora_strengths = [ipadapter_data[i*num_ipa_units:(i+1)*num_ipa_units] for i in range(3)]
|
| 279 |
-
|
| 280 |
all_presets_to_download = set()
|
| 281 |
-
|
| 282 |
for i in range(num_ipa_units):
|
| 283 |
if ipa_images[i] and ipa_weights[i] > 0 and final_preset:
|
| 284 |
all_presets_to_download.add(final_preset)
|
| 285 |
-
|
| 286 |
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 287 |
ipa_temp_path = os.path.join(INPUT_DIR, f"temp_ipa_{i}_{random.randint(1000, 9999)}.png")
|
| 288 |
ipa_images[i].save(ipa_temp_path, "PNG")
|
|
@@ -291,34 +323,111 @@ class SdImagePipeline(BasePipeline):
|
|
| 291 |
"image": os.path.basename(ipa_temp_path), "preset": final_preset,
|
| 292 |
"weight": ipa_weights[i], "lora_strength": ipa_lora_strengths[i]
|
| 293 |
})
|
| 294 |
-
|
| 295 |
if active_ipadapters and final_preset:
|
| 296 |
all_presets_to_download.add(final_preset)
|
| 297 |
-
|
| 298 |
for preset in all_presets_to_download:
|
| 299 |
ensure_ipadapter_models_downloaded(preset, progress)
|
| 300 |
-
|
|
|
|
| 301 |
if active_ipadapters:
|
| 302 |
active_ipadapters.append({
|
| 303 |
-
'is_final_settings': True, 'model_type':
|
| 304 |
'final_weight': final_weight, 'final_lora_strength': final_lora_strength,
|
| 305 |
'final_embeds_scaling': final_embeds_scaling, 'final_combine_method': final_combine_method
|
| 306 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 307 |
|
| 308 |
from utils.app_utils import get_vae_path
|
| 309 |
vae_source = ui_inputs.get('vae_source')
|
| 310 |
vae_id = ui_inputs.get('vae_id')
|
| 311 |
-
vae_file = ui_inputs.get('vae_file')
|
| 312 |
vae_name_override = None
|
| 313 |
-
|
| 314 |
if vae_source and vae_source != "None":
|
| 315 |
if vae_source == "File":
|
| 316 |
vae_name_override = sanitize_filename(vae_id)
|
| 317 |
elif vae_source == "Civitai" and vae_id and vae_id.strip():
|
| 318 |
-
local_path, status = get_vae_path(vae_source, vae_id,
|
| 319 |
if local_path: vae_name_override = os.path.basename(local_path)
|
| 320 |
else: raise gr.Error(f"Failed to prepare VAE {vae_id}: {status}")
|
| 321 |
-
|
| 322 |
if vae_name_override:
|
| 323 |
ui_inputs['vae_name'] = vae_name_override
|
| 324 |
|
|
@@ -326,22 +435,12 @@ class SdImagePipeline(BasePipeline):
|
|
| 326 |
active_conditioning = []
|
| 327 |
if conditioning_data:
|
| 328 |
num_units = len(conditioning_data) // 6
|
| 329 |
-
prompts
|
| 330 |
-
widths = conditioning_data[1*num_units : 2*num_units]
|
| 331 |
-
heights = conditioning_data[2*num_units : 3*num_units]
|
| 332 |
-
xs = conditioning_data[3*num_units : 4*num_units]
|
| 333 |
-
ys = conditioning_data[4*num_units : 5*num_units]
|
| 334 |
-
strengths = conditioning_data[5*num_units : 6*num_units]
|
| 335 |
-
|
| 336 |
for i in range(num_units):
|
| 337 |
if prompts[i] and prompts[i].strip():
|
| 338 |
active_conditioning.append({
|
| 339 |
-
"prompt": prompts[i],
|
| 340 |
-
"
|
| 341 |
-
"height": int(heights[i]),
|
| 342 |
-
"x": int(xs[i]),
|
| 343 |
-
"y": int(ys[i]),
|
| 344 |
-
"strength": float(strengths[i])
|
| 345 |
})
|
| 346 |
|
| 347 |
loras_string = f"LoRAs: [{', '.join(active_loras_for_meta)}]" if active_loras_for_meta else ""
|
|
@@ -350,31 +449,62 @@ class SdImagePipeline(BasePipeline):
|
|
| 350 |
|
| 351 |
if ui_inputs.get('seed') == -1:
|
| 352 |
ui_inputs['seed'] = random.randint(0, 2**32 - 1)
|
| 353 |
-
|
| 354 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 355 |
|
| 356 |
recipe_path = os.path.join(os.path.dirname(__file__), "workflow_recipes", "sd_unified_recipe.yaml")
|
| 357 |
assembler = WorkflowAssembler(recipe_path, dynamic_values=dynamic_values)
|
| 358 |
|
| 359 |
workflow_inputs = {
|
|
|
|
| 360 |
"positive_prompt": ui_inputs['positive_prompt'], "negative_prompt": ui_inputs['negative_prompt'],
|
| 361 |
"seed": ui_inputs['seed'], "steps": ui_inputs['num_inference_steps'], "cfg": ui_inputs['guidance_scale'],
|
| 362 |
"sampler_name": ui_inputs['sampler'], "scheduler": ui_inputs['scheduler'],
|
| 363 |
"batch_size": ui_inputs['batch_size'],
|
| 364 |
-
"
|
| 365 |
-
"
|
| 366 |
-
"inpaint_image": ui_inputs.get('inpaint_image'),
|
| 367 |
-
"inpaint_mask": ui_inputs.get('inpaint_mask'),
|
| 368 |
-
"left": ui_inputs.get('outpaint_left'), "top": ui_inputs.get('outpaint_top'),
|
| 369 |
-
"right": ui_inputs.get('outpaint_right'), "bottom": ui_inputs.get('outpaint_bottom'),
|
| 370 |
-
"hires_upscaler": ui_inputs.get('hires_upscaler'), "hires_scale_by": ui_inputs.get('hires_scale_by'),
|
| 371 |
-
"model_name": ALL_MODEL_MAP[ui_inputs['model_display_name']][1],
|
| 372 |
"vae_name": ui_inputs.get('vae_name'),
|
|
|
|
| 373 |
"lora_chain": active_loras_for_gpu,
|
| 374 |
"controlnet_chain": active_controlnets,
|
|
|
|
| 375 |
"ipadapter_chain": active_ipadapters,
|
|
|
|
|
|
|
|
|
|
| 376 |
"conditioning_chain": active_conditioning,
|
|
|
|
| 377 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 378 |
|
| 379 |
if task_type == 'txt2img':
|
| 380 |
workflow_inputs['width'] = ui_inputs['width']
|
|
@@ -382,24 +512,19 @@ class SdImagePipeline(BasePipeline):
|
|
| 382 |
|
| 383 |
workflow = assembler.assemble(workflow_inputs)
|
| 384 |
|
| 385 |
-
if
|
| 386 |
print("--- [Workflow Patch] VAE override provided. Adding VAELoader and rewiring connections. ---")
|
| 387 |
vae_loader_id = assembler._get_unique_id()
|
| 388 |
vae_loader_node = assembler._get_node_template("VAELoader")
|
| 389 |
-
vae_loader_node['inputs']['vae_name'] =
|
| 390 |
workflow[vae_loader_id] = vae_loader_node
|
| 391 |
|
| 392 |
vae_decode_id = assembler.node_map.get("vae_decode")
|
| 393 |
if vae_decode_id and vae_decode_id in workflow:
|
| 394 |
workflow[vae_decode_id]['inputs']['vae'] = [vae_loader_id, 0]
|
| 395 |
-
print(f" - Rewired 'vae_decode' (ID: {vae_decode_id}) to use new VAELoader.")
|
| 396 |
-
|
| 397 |
vae_encode_id = assembler.node_map.get("vae_encode")
|
| 398 |
if vae_encode_id and vae_encode_id in workflow:
|
| 399 |
workflow[vae_encode_id]['inputs']['vae'] = [vae_loader_id, 0]
|
| 400 |
-
print(f" - Rewired 'vae_encode' (ID: {vae_encode_id}) to use new VAELoader.")
|
| 401 |
-
else:
|
| 402 |
-
print("--- [Workflow Info] No VAE override. Using VAE from checkpoint. ---")
|
| 403 |
|
| 404 |
progress(1.0, desc="All models ready. Requesting GPU for generation...")
|
| 405 |
|
|
|
|
| 16 |
|
| 17 |
class SdImagePipeline(BasePipeline):
|
| 18 |
def get_required_models(self, model_display_name: str, **kwargs) -> List[str]:
|
| 19 |
+
model_info = ALL_MODEL_MAP.get(model_display_name)
|
| 20 |
+
if not model_info:
|
| 21 |
+
return [model_display_name]
|
| 22 |
+
|
| 23 |
+
path_or_components = model_info[1]
|
| 24 |
+
if isinstance(path_or_components, dict):
|
| 25 |
+
return [v for v in path_or_components.values() if v and v != "pixel_space"]
|
| 26 |
+
else:
|
| 27 |
+
return [model_display_name]
|
| 28 |
|
| 29 |
def _topological_sort(self, workflow: Dict[str, Any]) -> List[str]:
|
| 30 |
graph = defaultdict(list)
|
|
|
|
| 126 |
progress(0.4, desc="Executing workflow...")
|
| 127 |
|
| 128 |
initial_objects = {}
|
| 129 |
+
|
| 130 |
decoded_images_tensor = self._execute_workflow(workflow, initial_objects=initial_objects)
|
| 131 |
|
| 132 |
output_images = []
|
|
|
|
| 142 |
params_string = f"{ui_inputs['positive_prompt']}\nNegative prompt: {ui_inputs['negative_prompt']}\n"
|
| 143 |
params_string += f"Steps: {ui_inputs['num_inference_steps']}, Sampler: {ui_inputs['sampler']}, Scheduler: {ui_inputs['scheduler']}, CFG scale: {ui_inputs['guidance_scale']}, Seed: {current_seed}, Size: {width_for_meta}x{height_for_meta}, Base Model: {model_display_name}"
|
| 144 |
if ui_inputs['task_type'] != 'txt2img': params_string += f", Denoise: {ui_inputs['denoise']}"
|
| 145 |
+
if ui_inputs.get('clip_skip') and ui_inputs['clip_skip'] != 1: params_string += f", Clip skip: {abs(ui_inputs['clip_skip'])}"
|
| 146 |
if loras_string: params_string += f", {loras_string}"
|
| 147 |
|
| 148 |
pil_image.info = {'parameters': params_string.strip()}
|
|
|
|
| 154 |
progress(0, desc="Preparing models...")
|
| 155 |
|
| 156 |
task_type = ui_inputs['task_type']
|
| 157 |
+
model_display_name = ui_inputs['model_display_name']
|
| 158 |
+
model_type = MODEL_TYPE_MAP.get(model_display_name, 'sdxl')
|
| 159 |
+
|
| 160 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 161 |
+
workflow_model_type = architectures_dict.get(model_type, {}).get("model_type", "sdxl")
|
| 162 |
|
| 163 |
ui_inputs['positive_prompt'] = sanitize_prompt(ui_inputs.get('positive_prompt', ''))
|
| 164 |
ui_inputs['negative_prompt'] = sanitize_prompt(ui_inputs.get('negative_prompt', ''))
|
| 165 |
|
| 166 |
+
if 'clip_skip' in ui_inputs and ui_inputs['clip_skip'] is not None:
|
| 167 |
+
ui_inputs['clip_skip'] = -int(ui_inputs['clip_skip'])
|
| 168 |
+
else:
|
| 169 |
+
ui_inputs['clip_skip'] = -1
|
| 170 |
+
|
| 171 |
+
required_models = self.get_required_models(model_display_name=model_display_name)
|
| 172 |
self.model_manager.ensure_models_downloaded(required_models, progress=progress)
|
| 173 |
|
| 174 |
lora_data = ui_inputs.get('lora_data', [])
|
| 175 |
active_loras_for_gpu, active_loras_for_meta = [], []
|
| 176 |
if lora_data:
|
| 177 |
sources, ids, scales, files = lora_data[0::4], lora_data[1::4], lora_data[2::4], lora_data[3::4]
|
|
|
|
| 178 |
for i, (source, lora_id, scale, _) in enumerate(zip(sources, ids, scales, files)):
|
| 179 |
if scale > 0 and lora_id and lora_id.strip():
|
| 180 |
lora_filename = None
|
| 181 |
if source == "File":
|
| 182 |
lora_filename = sanitize_filename(lora_id)
|
| 183 |
elif source == "Civitai":
|
| 184 |
+
local_path, status = get_lora_path(source, lora_id, os.environ.get("CIVITAI_API_KEY", ""), progress)
|
| 185 |
if local_path: lora_filename = os.path.basename(local_path)
|
| 186 |
else: raise gr.Error(f"Failed to prepare LoRA {lora_id}: {status}")
|
| 187 |
|
|
|
|
| 194 |
elif task_type == 'hires_fix': ui_inputs['denoise'] = ui_inputs.get('hires_denoise', 0.55)
|
| 195 |
|
| 196 |
temp_files_to_clean = []
|
|
|
|
| 197 |
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 198 |
|
| 199 |
if task_type == 'img2img':
|
|
|
|
| 212 |
raise gr.Error("Inpainting requires an input image and a drawn mask.")
|
| 213 |
|
| 214 |
background_img = inpaint_dict['background'].convert("RGBA")
|
|
|
|
| 215 |
composite_mask_pil = Image.new('L', background_img.size, 0)
|
| 216 |
for layer in inpaint_dict['layers']:
|
| 217 |
if layer:
|
|
|
|
| 225 |
temp_file_path = os.path.join(INPUT_DIR, f"temp_inpaint_composite_{random.randint(1000, 9999)}.png")
|
| 226 |
composite_image_with_mask.save(temp_file_path, "PNG")
|
| 227 |
|
| 228 |
+
ui_inputs['input_image'] = os.path.basename(temp_file_path)
|
| 229 |
temp_files_to_clean.append(temp_file_path)
|
| 230 |
ui_inputs.pop('inpaint_mask', None)
|
| 231 |
|
|
|
|
| 236 |
input_image_pil.save(temp_file_path, "PNG")
|
| 237 |
ui_inputs['input_image'] = os.path.basename(temp_file_path)
|
| 238 |
temp_files_to_clean.append(temp_file_path)
|
| 239 |
+
|
| 240 |
+
ui_inputs['megapixels'] = 0.25
|
| 241 |
+
ui_inputs['grow_mask_by'] = ui_inputs.get('feathering', 10)
|
| 242 |
|
| 243 |
elif task_type == 'hires_fix':
|
| 244 |
input_image_pil = ui_inputs.get('hires_image')
|
|
|
|
| 258 |
if source == "File":
|
| 259 |
emb_filename = sanitize_filename(emb_id)
|
| 260 |
elif source == "Civitai":
|
| 261 |
+
local_path, status = get_embedding_path(source, emb_id, os.environ.get("CIVITAI_API_KEY", ""), progress)
|
| 262 |
if local_path: emb_filename = os.path.basename(local_path)
|
| 263 |
else: raise gr.Error(f"Failed to prepare Embedding {emb_id}: {status}")
|
| 264 |
|
|
|
|
| 274 |
|
| 275 |
controlnet_data = ui_inputs.get('controlnet_data', [])
|
| 276 |
active_controlnets = []
|
| 277 |
+
if controlnet_data:
|
| 278 |
+
(cn_images, _, _, cn_strengths, cn_filepaths) = [controlnet_data[i::5] for i in range(5)]
|
| 279 |
+
for i in range(len(cn_images)):
|
| 280 |
+
if cn_images[i] and cn_strengths[i] > 0 and cn_filepaths[i] and cn_filepaths[i] != "None":
|
| 281 |
+
ensure_controlnet_model_downloaded(cn_filepaths[i], progress)
|
| 282 |
+
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 283 |
+
cn_temp_path = os.path.join(INPUT_DIR, f"temp_cn_{i}_{random.randint(1000, 9999)}.png")
|
| 284 |
+
cn_images[i].save(cn_temp_path, "PNG")
|
| 285 |
+
temp_files_to_clean.append(cn_temp_path)
|
| 286 |
+
active_controlnets.append({
|
| 287 |
+
"image": os.path.basename(cn_temp_path), "strength": cn_strengths[i],
|
| 288 |
+
"start_percent": 0.0, "end_percent": 1.0, "control_net_name": cn_filepaths[i]
|
| 289 |
+
})
|
| 290 |
+
|
| 291 |
+
diffsynth_controlnet_data = ui_inputs.get('diffsynth_controlnet_data', [])
|
| 292 |
+
active_diffsynth_controlnets = []
|
| 293 |
+
if diffsynth_controlnet_data:
|
| 294 |
+
(cn_images, _, _, cn_strengths, cn_filepaths) = [diffsynth_controlnet_data[i::5] for i in range(5)]
|
| 295 |
+
for i in range(len(cn_images)):
|
| 296 |
+
if cn_images[i] and cn_strengths[i] > 0 and cn_filepaths[i] and cn_filepaths[i] != "None":
|
| 297 |
+
ensure_controlnet_model_downloaded(cn_filepaths[i], progress)
|
| 298 |
+
|
| 299 |
+
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 300 |
+
cn_temp_path = os.path.join(INPUT_DIR, f"temp_diffsynth_cn_{i}_{random.randint(1000, 9999)}.png")
|
| 301 |
+
cn_images[i].save(cn_temp_path, "PNG")
|
| 302 |
+
temp_files_to_clean.append(cn_temp_path)
|
| 303 |
+
active_diffsynth_controlnets.append({
|
| 304 |
+
"image": os.path.basename(cn_temp_path), "strength": cn_strengths[i],
|
| 305 |
+
"control_net_name": cn_filepaths[i]
|
| 306 |
+
})
|
| 307 |
|
| 308 |
ipadapter_data = ui_inputs.get('ipadapter_data', [])
|
| 309 |
active_ipadapters = []
|
|
|
|
| 311 |
num_ipa_units = (len(ipadapter_data) - 5) // 3
|
| 312 |
final_preset, final_weight, final_lora_strength, final_embeds_scaling, final_combine_method = ipadapter_data[-5:]
|
| 313 |
ipa_images, ipa_weights, ipa_lora_strengths = [ipadapter_data[i*num_ipa_units:(i+1)*num_ipa_units] for i in range(3)]
|
|
|
|
| 314 |
all_presets_to_download = set()
|
|
|
|
| 315 |
for i in range(num_ipa_units):
|
| 316 |
if ipa_images[i] and ipa_weights[i] > 0 and final_preset:
|
| 317 |
all_presets_to_download.add(final_preset)
|
|
|
|
| 318 |
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 319 |
ipa_temp_path = os.path.join(INPUT_DIR, f"temp_ipa_{i}_{random.randint(1000, 9999)}.png")
|
| 320 |
ipa_images[i].save(ipa_temp_path, "PNG")
|
|
|
|
| 323 |
"image": os.path.basename(ipa_temp_path), "preset": final_preset,
|
| 324 |
"weight": ipa_weights[i], "lora_strength": ipa_lora_strengths[i]
|
| 325 |
})
|
|
|
|
| 326 |
if active_ipadapters and final_preset:
|
| 327 |
all_presets_to_download.add(final_preset)
|
|
|
|
| 328 |
for preset in all_presets_to_download:
|
| 329 |
ensure_ipadapter_models_downloaded(preset, progress)
|
| 330 |
+
|
| 331 |
+
model_type_key = 'sd15' if workflow_model_type == 'sd15' else 'sdxl'
|
| 332 |
if active_ipadapters:
|
| 333 |
active_ipadapters.append({
|
| 334 |
+
'is_final_settings': True, 'model_type': model_type_key, 'final_preset': final_preset,
|
| 335 |
'final_weight': final_weight, 'final_lora_strength': final_lora_strength,
|
| 336 |
'final_embeds_scaling': final_embeds_scaling, 'final_combine_method': final_combine_method
|
| 337 |
})
|
| 338 |
+
|
| 339 |
+
flux1_ipadapter_data = ui_inputs.get('flux1_ipadapter_data', [])
|
| 340 |
+
active_flux1_ipadapters = []
|
| 341 |
+
if flux1_ipadapter_data:
|
| 342 |
+
num_units = len(flux1_ipadapter_data) // 4
|
| 343 |
+
f_images = flux1_ipadapter_data[0*num_units : 1*num_units]
|
| 344 |
+
f_weights = flux1_ipadapter_data[1*num_units : 2*num_units]
|
| 345 |
+
f_starts = flux1_ipadapter_data[2*num_units : 3*num_units]
|
| 346 |
+
f_ends = flux1_ipadapter_data[3*num_units : 4*num_units]
|
| 347 |
+
for i in range(len(f_images)):
|
| 348 |
+
if f_images[i] and f_weights[i] > 0:
|
| 349 |
+
from utils.app_utils import _ensure_model_downloaded
|
| 350 |
+
for filename in ["ip-adapter.bin"]:
|
| 351 |
+
_ensure_model_downloaded(filename, progress)
|
| 352 |
+
|
| 353 |
+
from huggingface_hub import snapshot_download
|
| 354 |
+
progress(0.5, desc="Caching HF SigLIP model...")
|
| 355 |
+
snapshot_download(
|
| 356 |
+
repo_id="google/siglip-so400m-patch14-384",
|
| 357 |
+
allow_patterns=["*.json", "*.safetensors", "*.txt"],
|
| 358 |
+
ignore_patterns=["*.msgpack", "*.h5", "*.bin"]
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
temp_path = os.path.join(INPUT_DIR, f"temp_fipa_{i}_{random.randint(1000, 9999)}.png")
|
| 362 |
+
f_images[i].save(temp_path, "PNG")
|
| 363 |
+
temp_files_to_clean.append(temp_path)
|
| 364 |
+
active_flux1_ipadapters.append({
|
| 365 |
+
"image": os.path.basename(temp_path),
|
| 366 |
+
"weight": f_weights[i], "start_percent": f_starts[i], "end_percent": f_ends[i]
|
| 367 |
+
})
|
| 368 |
+
|
| 369 |
+
sd3_ipadapter_data = ui_inputs.get('sd3_ipadapter_chain', [])
|
| 370 |
+
active_sd3_ipadapters = []
|
| 371 |
+
if sd3_ipadapter_data:
|
| 372 |
+
num_units = len(sd3_ipadapter_data) // 4
|
| 373 |
+
s_images = sd3_ipadapter_data[0*num_units : 1*num_units]
|
| 374 |
+
s_weights = sd3_ipadapter_data[1*num_units : 2*num_units]
|
| 375 |
+
s_starts = sd3_ipadapter_data[2*num_units : 3*num_units]
|
| 376 |
+
s_ends = sd3_ipadapter_data[3*num_units : 4*num_units]
|
| 377 |
+
sd3_ipa_downloaded = False
|
| 378 |
+
for i in range(len(s_images)):
|
| 379 |
+
if s_images[i] and s_weights[i] > 0:
|
| 380 |
+
if not sd3_ipa_downloaded:
|
| 381 |
+
from utils.app_utils import ensure_sd3_ipadapter_models_downloaded
|
| 382 |
+
ensure_sd3_ipadapter_models_downloaded(progress)
|
| 383 |
+
sd3_ipa_downloaded = True
|
| 384 |
+
temp_path = os.path.join(INPUT_DIR, f"temp_s3ipa_{i}_{random.randint(1000, 9999)}.png")
|
| 385 |
+
s_images[i].save(temp_path, "PNG")
|
| 386 |
+
temp_files_to_clean.append(temp_path)
|
| 387 |
+
active_sd3_ipadapters.append({
|
| 388 |
+
"image": os.path.basename(temp_path),
|
| 389 |
+
"weight": s_weights[i], "start_percent": s_starts[i], "end_percent": s_ends[i]
|
| 390 |
+
})
|
| 391 |
+
|
| 392 |
+
style_data = ui_inputs.get('style_data', [])
|
| 393 |
+
active_styles = []
|
| 394 |
+
if style_data:
|
| 395 |
+
num_units = len(style_data) // 2
|
| 396 |
+
st_images = style_data[0*num_units : 1*num_units]
|
| 397 |
+
st_strengths = style_data[1*num_units : 2*num_units]
|
| 398 |
+
for i in range(len(st_images)):
|
| 399 |
+
if st_images[i] and st_strengths[i] > 0:
|
| 400 |
+
from utils.app_utils import _ensure_model_downloaded
|
| 401 |
+
_ensure_model_downloaded("sigclip_vision_patch14_384.safetensors", progress)
|
| 402 |
+
temp_path = os.path.join(INPUT_DIR, f"temp_style_{i}_{random.randint(1000, 9999)}.png")
|
| 403 |
+
st_images[i].save(temp_path, "PNG")
|
| 404 |
+
temp_files_to_clean.append(temp_path)
|
| 405 |
+
active_styles.append({
|
| 406 |
+
"image": os.path.basename(temp_path), "strength": st_strengths[i]
|
| 407 |
+
})
|
| 408 |
+
|
| 409 |
+
reference_latent_data = ui_inputs.get('reference_latent_data', [])
|
| 410 |
+
active_reference_latents = []
|
| 411 |
+
if reference_latent_data:
|
| 412 |
+
for img in reference_latent_data:
|
| 413 |
+
if img:
|
| 414 |
+
if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
|
| 415 |
+
temp_path = os.path.join(INPUT_DIR, f"temp_ref_{random.randint(1000, 9999)}.png")
|
| 416 |
+
img.save(temp_path, "PNG")
|
| 417 |
+
temp_files_to_clean.append(temp_path)
|
| 418 |
+
active_reference_latents.append(os.path.basename(temp_path))
|
| 419 |
|
| 420 |
from utils.app_utils import get_vae_path
|
| 421 |
vae_source = ui_inputs.get('vae_source')
|
| 422 |
vae_id = ui_inputs.get('vae_id')
|
|
|
|
| 423 |
vae_name_override = None
|
|
|
|
| 424 |
if vae_source and vae_source != "None":
|
| 425 |
if vae_source == "File":
|
| 426 |
vae_name_override = sanitize_filename(vae_id)
|
| 427 |
elif vae_source == "Civitai" and vae_id and vae_id.strip():
|
| 428 |
+
local_path, status = get_vae_path(vae_source, vae_id, os.environ.get("CIVITAI_API_KEY", ""), progress)
|
| 429 |
if local_path: vae_name_override = os.path.basename(local_path)
|
| 430 |
else: raise gr.Error(f"Failed to prepare VAE {vae_id}: {status}")
|
|
|
|
| 431 |
if vae_name_override:
|
| 432 |
ui_inputs['vae_name'] = vae_name_override
|
| 433 |
|
|
|
|
| 435 |
active_conditioning = []
|
| 436 |
if conditioning_data:
|
| 437 |
num_units = len(conditioning_data) // 6
|
| 438 |
+
prompts, widths, heights, xs, ys, strengths = [conditioning_data[i*num_units : (i+1)*num_units] for i in range(6)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 439 |
for i in range(num_units):
|
| 440 |
if prompts[i] and prompts[i].strip():
|
| 441 |
active_conditioning.append({
|
| 442 |
+
"prompt": prompts[i], "width": int(widths[i]), "height": int(heights[i]),
|
| 443 |
+
"x": int(xs[i]), "y": int(ys[i]), "strength": float(strengths[i])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 444 |
})
|
| 445 |
|
| 446 |
loras_string = f"LoRAs: [{', '.join(active_loras_for_meta)}]" if active_loras_for_meta else ""
|
|
|
|
| 449 |
|
| 450 |
if ui_inputs.get('seed') == -1:
|
| 451 |
ui_inputs['seed'] = random.randint(0, 2**32 - 1)
|
| 452 |
+
|
| 453 |
+
model_info = ALL_MODEL_MAP[model_display_name]
|
| 454 |
+
path_or_components = model_info[1]
|
| 455 |
+
latent_type = model_info[3] if len(model_info) > 3 and model_info[3] else 'latent'
|
| 456 |
+
latent_generator_template = "EmptyLatentImage"
|
| 457 |
+
if latent_type == 'sd3_latent':
|
| 458 |
+
latent_generator_template = "EmptySD3LatentImage"
|
| 459 |
+
elif latent_type == 'chroma_radiance_latent':
|
| 460 |
+
latent_generator_template = "EmptyChromaRadianceLatentImage"
|
| 461 |
+
elif latent_type == 'hunyuan_latent':
|
| 462 |
+
latent_generator_template = "EmptyHunyuanImageLatent"
|
| 463 |
+
|
| 464 |
+
dynamic_values = {
|
| 465 |
+
'task_type': ui_inputs['task_type'],
|
| 466 |
+
'model_type': workflow_model_type,
|
| 467 |
+
'latent_type': latent_type,
|
| 468 |
+
'latent_generator_template': latent_generator_template
|
| 469 |
+
}
|
| 470 |
|
| 471 |
recipe_path = os.path.join(os.path.dirname(__file__), "workflow_recipes", "sd_unified_recipe.yaml")
|
| 472 |
assembler = WorkflowAssembler(recipe_path, dynamic_values=dynamic_values)
|
| 473 |
|
| 474 |
workflow_inputs = {
|
| 475 |
+
**ui_inputs,
|
| 476 |
"positive_prompt": ui_inputs['positive_prompt'], "negative_prompt": ui_inputs['negative_prompt'],
|
| 477 |
"seed": ui_inputs['seed'], "steps": ui_inputs['num_inference_steps'], "cfg": ui_inputs['guidance_scale'],
|
| 478 |
"sampler_name": ui_inputs['sampler'], "scheduler": ui_inputs['scheduler'],
|
| 479 |
"batch_size": ui_inputs['batch_size'],
|
| 480 |
+
"clip_skip": ui_inputs['clip_skip'],
|
| 481 |
+
"denoise": ui_inputs['denoise'],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 482 |
"vae_name": ui_inputs.get('vae_name'),
|
| 483 |
+
"guidance": ui_inputs.get('guidance', 3.5),
|
| 484 |
"lora_chain": active_loras_for_gpu,
|
| 485 |
"controlnet_chain": active_controlnets,
|
| 486 |
+
"diffsynth_controlnet_chain": active_diffsynth_controlnets,
|
| 487 |
"ipadapter_chain": active_ipadapters,
|
| 488 |
+
"flux1_ipadapter_chain": active_flux1_ipadapters,
|
| 489 |
+
"sd3_ipadapter_chain": active_sd3_ipadapters,
|
| 490 |
+
"style_chain": active_styles,
|
| 491 |
"conditioning_chain": active_conditioning,
|
| 492 |
+
"reference_latent_chain": active_reference_latents,
|
| 493 |
}
|
| 494 |
+
|
| 495 |
+
if isinstance(path_or_components, dict):
|
| 496 |
+
workflow_inputs.update({
|
| 497 |
+
'unet_name': path_or_components.get('unet'),
|
| 498 |
+
'vae_name': ui_inputs.get('vae_name') or path_or_components.get('vae'),
|
| 499 |
+
'clip_name': path_or_components.get('clip'),
|
| 500 |
+
'clip1_name': path_or_components.get('clip1'),
|
| 501 |
+
'clip2_name': path_or_components.get('clip2'),
|
| 502 |
+
'clip3_name': path_or_components.get('clip3'),
|
| 503 |
+
'clip4_name': path_or_components.get('clip4'),
|
| 504 |
+
'lora_name': path_or_components.get('lora'),
|
| 505 |
+
})
|
| 506 |
+
else:
|
| 507 |
+
workflow_inputs['model_name'] = path_or_components
|
| 508 |
|
| 509 |
if task_type == 'txt2img':
|
| 510 |
workflow_inputs['width'] = ui_inputs['width']
|
|
|
|
| 512 |
|
| 513 |
workflow = assembler.assemble(workflow_inputs)
|
| 514 |
|
| 515 |
+
if ui_inputs.get("vae_name") and workflow_model_type not in ['flux1', 'hidream', 'lumina', 'omnigen2', 'chroma1-radiance', 'chroma1', 'hunyuanimage', 'ovis-image', 'longcat-image']:
|
| 516 |
print("--- [Workflow Patch] VAE override provided. Adding VAELoader and rewiring connections. ---")
|
| 517 |
vae_loader_id = assembler._get_unique_id()
|
| 518 |
vae_loader_node = assembler._get_node_template("VAELoader")
|
| 519 |
+
vae_loader_node['inputs']['vae_name'] = ui_inputs["vae_name"]
|
| 520 |
workflow[vae_loader_id] = vae_loader_node
|
| 521 |
|
| 522 |
vae_decode_id = assembler.node_map.get("vae_decode")
|
| 523 |
if vae_decode_id and vae_decode_id in workflow:
|
| 524 |
workflow[vae_decode_id]['inputs']['vae'] = [vae_loader_id, 0]
|
|
|
|
|
|
|
| 525 |
vae_encode_id = assembler.node_map.get("vae_encode")
|
| 526 |
if vae_encode_id and vae_encode_id in workflow:
|
| 527 |
workflow[vae_encode_id]['inputs']['vae'] = [vae_loader_id, 0]
|
|
|
|
|
|
|
|
|
|
| 528 |
|
| 529 |
progress(1.0, desc="All models ready. Requesting GPU for generation...")
|
| 530 |
|
core/pipelines/workflow_recipes/_partials/{_base_sampler.yaml → _base_sampler_sd.yaml}
RENAMED
|
@@ -1,11 +1,21 @@
|
|
| 1 |
nodes:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
ksampler:
|
| 3 |
class_type: KSampler
|
| 4 |
-
|
|
|
|
|
|
|
| 5 |
vae_decode:
|
| 6 |
class_type: VAEDecode
|
|
|
|
| 7 |
save_image:
|
| 8 |
class_type: SaveImage
|
|
|
|
| 9 |
params: {}
|
| 10 |
|
| 11 |
connections:
|
|
@@ -15,9 +25,12 @@ connections:
|
|
| 15 |
to: "save_image:images"
|
| 16 |
|
| 17 |
ui_map:
|
|
|
|
|
|
|
| 18 |
seed: "ksampler:seed"
|
| 19 |
steps: "ksampler:steps"
|
| 20 |
cfg: "ksampler:cfg"
|
| 21 |
sampler_name: "ksampler:sampler_name"
|
| 22 |
scheduler: "ksampler:scheduler"
|
| 23 |
-
denoise: "ksampler:denoise"
|
|
|
|
|
|
| 1 |
nodes:
|
| 2 |
+
pos_prompt:
|
| 3 |
+
class_type: CLIPTextEncode
|
| 4 |
+
title: "CLIP Text Encode (Positive)"
|
| 5 |
+
neg_prompt:
|
| 6 |
+
class_type: CLIPTextEncode
|
| 7 |
+
title: "CLIP Text Encode (Negative)"
|
| 8 |
ksampler:
|
| 9 |
class_type: KSampler
|
| 10 |
+
title: "KSampler"
|
| 11 |
+
params:
|
| 12 |
+
denoise: 1.0
|
| 13 |
vae_decode:
|
| 14 |
class_type: VAEDecode
|
| 15 |
+
title: "VAE Decode"
|
| 16 |
save_image:
|
| 17 |
class_type: SaveImage
|
| 18 |
+
title: "Save Image"
|
| 19 |
params: {}
|
| 20 |
|
| 21 |
connections:
|
|
|
|
| 25 |
to: "save_image:images"
|
| 26 |
|
| 27 |
ui_map:
|
| 28 |
+
positive_prompt: "pos_prompt:text"
|
| 29 |
+
negative_prompt: "neg_prompt:text"
|
| 30 |
seed: "ksampler:seed"
|
| 31 |
steps: "ksampler:steps"
|
| 32 |
cfg: "ksampler:cfg"
|
| 33 |
sampler_name: "ksampler:sampler_name"
|
| 34 |
scheduler: "ksampler:scheduler"
|
| 35 |
+
denoise: "ksampler:denoise"
|
| 36 |
+
filename_prefix: "save_image:filename_prefix"
|
core/pipelines/workflow_recipes/_partials/conditioning/anima.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: CLIPLoader
|
| 12 |
+
title: "Load CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "stable_diffusion"
|
| 15 |
+
device: "default"
|
| 16 |
+
|
| 17 |
+
connections:
|
| 18 |
+
- from: "unet_loader:0"
|
| 19 |
+
to: "ksampler:model"
|
| 20 |
+
- from: "clip_loader:0"
|
| 21 |
+
to: "pos_prompt:clip"
|
| 22 |
+
- from: "clip_loader:0"
|
| 23 |
+
to: "neg_prompt:clip"
|
| 24 |
+
- from: "vae_loader:0"
|
| 25 |
+
to: "vae_decode:vae"
|
| 26 |
+
- from: "vae_loader:0"
|
| 27 |
+
to: "vae_encode:vae"
|
| 28 |
+
- from: "pos_prompt:0"
|
| 29 |
+
to: "ksampler:positive"
|
| 30 |
+
- from: "neg_prompt:0"
|
| 31 |
+
to: "ksampler:negative"
|
| 32 |
+
|
| 33 |
+
dynamic_lora_chains:
|
| 34 |
+
lora_chain:
|
| 35 |
+
template: "LoraLoader"
|
| 36 |
+
output_map:
|
| 37 |
+
"unet_loader:0": "model"
|
| 38 |
+
"clip_loader:0": "clip"
|
| 39 |
+
input_map:
|
| 40 |
+
"model": "model"
|
| 41 |
+
"clip": "clip"
|
| 42 |
+
end_input_map:
|
| 43 |
+
"model": ["ksampler:model"]
|
| 44 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 45 |
+
|
| 46 |
+
dynamic_conditioning_chains:
|
| 47 |
+
conditioning_chain:
|
| 48 |
+
ksampler_node: "ksampler"
|
| 49 |
+
clip_source: "clip_loader:0"
|
| 50 |
+
|
| 51 |
+
ui_map:
|
| 52 |
+
unet_name: "unet_loader:unet_name"
|
| 53 |
+
vae_name: "vae_loader:vae_name"
|
| 54 |
+
clip_name: "clip_loader:clip_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/chroma1-radiance.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load VAE"
|
| 10 |
+
params:
|
| 11 |
+
vae_name: "pixel_space"
|
| 12 |
+
clip_loader:
|
| 13 |
+
class_type: CLIPLoader
|
| 14 |
+
title: "Load CLIP"
|
| 15 |
+
params:
|
| 16 |
+
type: "chroma"
|
| 17 |
+
device: "default"
|
| 18 |
+
t5_tokenizer:
|
| 19 |
+
class_type: T5TokenizerOptions
|
| 20 |
+
title: "T5TokenizerOptions"
|
| 21 |
+
params:
|
| 22 |
+
min_padding: 0
|
| 23 |
+
min_length: 3
|
| 24 |
+
model_sampler:
|
| 25 |
+
class_type: ModelSamplingAuraFlow
|
| 26 |
+
params:
|
| 27 |
+
shift: 3.0
|
| 28 |
+
|
| 29 |
+
connections:
|
| 30 |
+
- from: "unet_loader:0"
|
| 31 |
+
to: "model_sampler:model"
|
| 32 |
+
- from: "model_sampler:0"
|
| 33 |
+
to: "ksampler:model"
|
| 34 |
+
|
| 35 |
+
- from: "clip_loader:0"
|
| 36 |
+
to: "t5_tokenizer:clip"
|
| 37 |
+
- from: "t5_tokenizer:0"
|
| 38 |
+
to: "pos_prompt:clip"
|
| 39 |
+
- from: "t5_tokenizer:0"
|
| 40 |
+
to: "neg_prompt:clip"
|
| 41 |
+
|
| 42 |
+
- from: "pos_prompt:0"
|
| 43 |
+
to: "ksampler:positive"
|
| 44 |
+
- from: "neg_prompt:0"
|
| 45 |
+
to: "ksampler:negative"
|
| 46 |
+
|
| 47 |
+
- from: "vae_loader:0"
|
| 48 |
+
to: "vae_decode:vae"
|
| 49 |
+
- from: "vae_loader:0"
|
| 50 |
+
to: "vae_encode:vae"
|
| 51 |
+
|
| 52 |
+
dynamic_conditioning_chains:
|
| 53 |
+
conditioning_chain:
|
| 54 |
+
ksampler_node: "ksampler"
|
| 55 |
+
clip_source: "t5_tokenizer:0"
|
| 56 |
+
|
| 57 |
+
ui_map:
|
| 58 |
+
unet_name: "unet_loader:unet_name"
|
| 59 |
+
clip_name: "clip_loader:clip_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/chroma1.yaml
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: CLIPLoader
|
| 12 |
+
title: "Load CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "chroma"
|
| 15 |
+
device: "default"
|
| 16 |
+
t5_tokenizer:
|
| 17 |
+
class_type: T5TokenizerOptions
|
| 18 |
+
title: "T5TokenizerOptions"
|
| 19 |
+
params:
|
| 20 |
+
min_padding: 1
|
| 21 |
+
min_length: 0
|
| 22 |
+
fresca:
|
| 23 |
+
class_type: FreSca
|
| 24 |
+
title: "FreSca"
|
| 25 |
+
params:
|
| 26 |
+
scale_low: 1.0
|
| 27 |
+
scale_high: 2.5
|
| 28 |
+
freq_cutoff: 30
|
| 29 |
+
|
| 30 |
+
connections:
|
| 31 |
+
- from: "unet_loader:0"
|
| 32 |
+
to: "fresca:model"
|
| 33 |
+
- from: "fresca:0"
|
| 34 |
+
to: "ksampler:model"
|
| 35 |
+
|
| 36 |
+
- from: "clip_loader:0"
|
| 37 |
+
to: "t5_tokenizer:clip"
|
| 38 |
+
- from: "t5_tokenizer:0"
|
| 39 |
+
to: "pos_prompt:clip"
|
| 40 |
+
- from: "t5_tokenizer:0"
|
| 41 |
+
to: "neg_prompt:clip"
|
| 42 |
+
|
| 43 |
+
- from: "pos_prompt:0"
|
| 44 |
+
to: "ksampler:positive"
|
| 45 |
+
- from: "neg_prompt:0"
|
| 46 |
+
to: "ksampler:negative"
|
| 47 |
+
|
| 48 |
+
- from: "vae_loader:0"
|
| 49 |
+
to: "vae_decode:vae"
|
| 50 |
+
- from: "vae_loader:0"
|
| 51 |
+
to: "vae_encode:vae"
|
| 52 |
+
|
| 53 |
+
dynamic_conditioning_chains:
|
| 54 |
+
conditioning_chain:
|
| 55 |
+
ksampler_node: "ksampler"
|
| 56 |
+
clip_source: "t5_tokenizer:0"
|
| 57 |
+
|
| 58 |
+
ui_map:
|
| 59 |
+
unet_name: "unet_loader:unet_name"
|
| 60 |
+
vae_name: "vae_loader:vae_name"
|
| 61 |
+
clip_name: "clip_loader:clip_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/ernie-image.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
clip_loader:
|
| 8 |
+
class_type: CLIPLoader
|
| 9 |
+
title: "Load CLIP"
|
| 10 |
+
params:
|
| 11 |
+
type: "flux2"
|
| 12 |
+
device: "default"
|
| 13 |
+
vae_loader:
|
| 14 |
+
class_type: VAELoader
|
| 15 |
+
title: "Load VAE"
|
| 16 |
+
|
| 17 |
+
connections:
|
| 18 |
+
- from: "unet_loader:0"
|
| 19 |
+
to: "ksampler:model"
|
| 20 |
+
- from: "clip_loader:0"
|
| 21 |
+
to: "pos_prompt:clip"
|
| 22 |
+
- from: "clip_loader:0"
|
| 23 |
+
to: "neg_prompt:clip"
|
| 24 |
+
- from: "pos_prompt:0"
|
| 25 |
+
to: "ksampler:positive"
|
| 26 |
+
- from: "neg_prompt:0"
|
| 27 |
+
to: "ksampler:negative"
|
| 28 |
+
- from: "vae_loader:0"
|
| 29 |
+
to: "vae_decode:vae"
|
| 30 |
+
- from: "vae_loader:0"
|
| 31 |
+
to: "vae_encode:vae"
|
| 32 |
+
|
| 33 |
+
dynamic_lora_chains:
|
| 34 |
+
lora_chain:
|
| 35 |
+
template: "LoraLoader"
|
| 36 |
+
output_map:
|
| 37 |
+
"unet_loader:0": "model"
|
| 38 |
+
"clip_loader:0": "clip"
|
| 39 |
+
input_map:
|
| 40 |
+
"model": "model"
|
| 41 |
+
"clip": "clip"
|
| 42 |
+
end_input_map:
|
| 43 |
+
"model": ["ksampler:model"]
|
| 44 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 45 |
+
|
| 46 |
+
dynamic_conditioning_chains:
|
| 47 |
+
conditioning_chain:
|
| 48 |
+
ksampler_node: "ksampler"
|
| 49 |
+
clip_source: "clip_loader:0"
|
| 50 |
+
|
| 51 |
+
ui_map:
|
| 52 |
+
unet_name: "unet_loader:unet_name"
|
| 53 |
+
clip_name: "clip_loader:clip_name"
|
| 54 |
+
vae_name: "vae_loader:vae_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/flux1.yaml
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load FLUX UNET"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load FLUX VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: DualCLIPLoader
|
| 12 |
+
title: "Load FLUX Dual CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "flux"
|
| 15 |
+
device: "default"
|
| 16 |
+
flux_guidance:
|
| 17 |
+
class_type: FluxGuidance
|
| 18 |
+
title: "FluxGuidance"
|
| 19 |
+
|
| 20 |
+
connections:
|
| 21 |
+
- from: "unet_loader:0"
|
| 22 |
+
to: "ksampler:model"
|
| 23 |
+
- from: "clip_loader:0"
|
| 24 |
+
to: "pos_prompt:clip"
|
| 25 |
+
- from: "clip_loader:0"
|
| 26 |
+
to: "neg_prompt:clip"
|
| 27 |
+
- from: "vae_loader:0"
|
| 28 |
+
to: "vae_decode:vae"
|
| 29 |
+
- from: "vae_loader:0"
|
| 30 |
+
to: "vae_encode:vae"
|
| 31 |
+
- from: "pos_prompt:0"
|
| 32 |
+
to: "flux_guidance:conditioning"
|
| 33 |
+
- from: "flux_guidance:0"
|
| 34 |
+
to: "ksampler:positive"
|
| 35 |
+
- from: "neg_prompt:0"
|
| 36 |
+
to: "ksampler:negative"
|
| 37 |
+
|
| 38 |
+
dynamic_controlnet_chains:
|
| 39 |
+
controlnet_chain:
|
| 40 |
+
template: "ControlNetApplyAdvanced"
|
| 41 |
+
ksampler_node: "ksampler"
|
| 42 |
+
vae_source: "vae_loader:0"
|
| 43 |
+
|
| 44 |
+
dynamic_flux1_ipadapter_chains:
|
| 45 |
+
flux1_ipadapter_chain:
|
| 46 |
+
ksampler_node: "ksampler"
|
| 47 |
+
|
| 48 |
+
dynamic_style_chains:
|
| 49 |
+
style_chain:
|
| 50 |
+
flux_guidance_node: "flux_guidance"
|
| 51 |
+
ksampler_node: "ksampler"
|
| 52 |
+
|
| 53 |
+
dynamic_conditioning_chains:
|
| 54 |
+
conditioning_chain:
|
| 55 |
+
flux_guidance_node: "flux_guidance"
|
| 56 |
+
ksampler_node: "ksampler"
|
| 57 |
+
clip_source: "clip_loader:0"
|
| 58 |
+
|
| 59 |
+
ui_map:
|
| 60 |
+
unet_name: "unet_loader:unet_name"
|
| 61 |
+
vae_name: "vae_loader:vae_name"
|
| 62 |
+
clip1_name: "clip_loader:clip_name1"
|
| 63 |
+
clip2_name: "clip_loader:clip_name2"
|
| 64 |
+
guidance: "flux_guidance:guidance"
|
core/pipelines/workflow_recipes/_partials/conditioning/flux2-kv.yaml
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
clip_loader:
|
| 8 |
+
class_type: CLIPLoader
|
| 9 |
+
title: "Load CLIP"
|
| 10 |
+
params:
|
| 11 |
+
type: "flux2"
|
| 12 |
+
device: "default"
|
| 13 |
+
vae_loader:
|
| 14 |
+
class_type: VAELoader
|
| 15 |
+
title: "Load VAE"
|
| 16 |
+
|
| 17 |
+
flux_kv_cache:
|
| 18 |
+
class_type: FluxKVCache
|
| 19 |
+
title: "Flux KV Cache"
|
| 20 |
+
|
| 21 |
+
pos_prompt:
|
| 22 |
+
class_type: CLIPTextEncode
|
| 23 |
+
title: "CLIP Text Encode (Positive)"
|
| 24 |
+
neg_prompt:
|
| 25 |
+
class_type: CLIPTextEncode
|
| 26 |
+
title: "CLIP Text Encode (Negative)"
|
| 27 |
+
|
| 28 |
+
ksampler:
|
| 29 |
+
class_type: KSampler
|
| 30 |
+
title: "KSampler"
|
| 31 |
+
params:
|
| 32 |
+
denoise: 1.0
|
| 33 |
+
|
| 34 |
+
vae_decode:
|
| 35 |
+
class_type: VAEDecode
|
| 36 |
+
title: "VAE Decode"
|
| 37 |
+
|
| 38 |
+
save_image:
|
| 39 |
+
class_type: SaveImage
|
| 40 |
+
title: "Save Image"
|
| 41 |
+
|
| 42 |
+
connections:
|
| 43 |
+
- from: "unet_loader:0"
|
| 44 |
+
to: "flux_kv_cache:model"
|
| 45 |
+
- from: "flux_kv_cache:0"
|
| 46 |
+
to: "ksampler:model"
|
| 47 |
+
|
| 48 |
+
- from: "clip_loader:0"
|
| 49 |
+
to: "pos_prompt:clip"
|
| 50 |
+
- from: "clip_loader:0"
|
| 51 |
+
to: "neg_prompt:clip"
|
| 52 |
+
|
| 53 |
+
- from: "vae_loader:0"
|
| 54 |
+
to: "vae_decode:vae"
|
| 55 |
+
- from: "vae_loader:0"
|
| 56 |
+
to: "vae_encode:vae"
|
| 57 |
+
|
| 58 |
+
- from: "pos_prompt:0"
|
| 59 |
+
to: "ksampler:positive"
|
| 60 |
+
- from: "neg_prompt:0"
|
| 61 |
+
to: "ksampler:negative"
|
| 62 |
+
|
| 63 |
+
- from: "latent_source:0"
|
| 64 |
+
to: "ksampler:latent_image"
|
| 65 |
+
|
| 66 |
+
- from: "ksampler:0"
|
| 67 |
+
to: "vae_decode:samples"
|
| 68 |
+
- from: "vae_decode:0"
|
| 69 |
+
to: "save_image:images"
|
| 70 |
+
|
| 71 |
+
dynamic_lora_chains:
|
| 72 |
+
lora_chain:
|
| 73 |
+
template: "LoraLoader"
|
| 74 |
+
output_map:
|
| 75 |
+
"unet_loader:0": "model"
|
| 76 |
+
"clip_loader:0": "clip"
|
| 77 |
+
input_map:
|
| 78 |
+
"model": "model"
|
| 79 |
+
"clip": "clip"
|
| 80 |
+
end_input_map:
|
| 81 |
+
"model": ["flux_kv_cache:model"]
|
| 82 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 83 |
+
|
| 84 |
+
dynamic_reference_latent_chains:
|
| 85 |
+
reference_latent_chain:
|
| 86 |
+
ksampler_node: "ksampler"
|
| 87 |
+
vae_node: "vae_loader"
|
| 88 |
+
|
| 89 |
+
ui_map:
|
| 90 |
+
unet_name: "unet_loader:unet_name"
|
| 91 |
+
clip_name: "clip_loader:clip_name"
|
| 92 |
+
vae_name: "vae_loader:vae_name"
|
| 93 |
+
|
| 94 |
+
positive_prompt: "pos_prompt:text"
|
| 95 |
+
negative_prompt: "neg_prompt:text"
|
| 96 |
+
|
| 97 |
+
seed: "ksampler:seed"
|
| 98 |
+
steps: "ksampler:steps"
|
| 99 |
+
cfg: "ksampler:cfg"
|
| 100 |
+
sampler_name: "ksampler:sampler_name"
|
| 101 |
+
scheduler: "ksampler:scheduler"
|
| 102 |
+
denoise: "ksampler:denoise"
|
| 103 |
+
|
| 104 |
+
filename_prefix: "save_image:filename_prefix"
|
core/pipelines/workflow_recipes/_partials/conditioning/flux2.yaml
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
clip_loader:
|
| 8 |
+
class_type: CLIPLoader
|
| 9 |
+
title: "Load CLIP"
|
| 10 |
+
params:
|
| 11 |
+
type: "flux2"
|
| 12 |
+
device: "default"
|
| 13 |
+
vae_loader:
|
| 14 |
+
class_type: VAELoader
|
| 15 |
+
title: "Load VAE"
|
| 16 |
+
|
| 17 |
+
pos_prompt:
|
| 18 |
+
class_type: CLIPTextEncode
|
| 19 |
+
title: "CLIP Text Encode (Positive)"
|
| 20 |
+
neg_prompt:
|
| 21 |
+
class_type: CLIPTextEncode
|
| 22 |
+
title: "CLIP Text Encode (Negative)"
|
| 23 |
+
|
| 24 |
+
ksampler:
|
| 25 |
+
class_type: KSampler
|
| 26 |
+
title: "KSampler"
|
| 27 |
+
params:
|
| 28 |
+
denoise: 1.0
|
| 29 |
+
|
| 30 |
+
vae_decode:
|
| 31 |
+
class_type: VAEDecode
|
| 32 |
+
title: "VAE Decode"
|
| 33 |
+
|
| 34 |
+
save_image:
|
| 35 |
+
class_type: SaveImage
|
| 36 |
+
title: "Save Image"
|
| 37 |
+
|
| 38 |
+
connections:
|
| 39 |
+
- from: "unet_loader:0"
|
| 40 |
+
to: "ksampler:model"
|
| 41 |
+
- from: "clip_loader:0"
|
| 42 |
+
to: "pos_prompt:clip"
|
| 43 |
+
- from: "clip_loader:0"
|
| 44 |
+
to: "neg_prompt:clip"
|
| 45 |
+
- from: "vae_loader:0"
|
| 46 |
+
to: "vae_decode:vae"
|
| 47 |
+
- from: "vae_loader:0"
|
| 48 |
+
to: "vae_encode:vae"
|
| 49 |
+
|
| 50 |
+
- from: "pos_prompt:0"
|
| 51 |
+
to: "ksampler:positive"
|
| 52 |
+
- from: "neg_prompt:0"
|
| 53 |
+
to: "ksampler:negative"
|
| 54 |
+
|
| 55 |
+
- from: "latent_source:0"
|
| 56 |
+
to: "ksampler:latent_image"
|
| 57 |
+
|
| 58 |
+
- from: "ksampler:0"
|
| 59 |
+
to: "vae_decode:samples"
|
| 60 |
+
- from: "vae_decode:0"
|
| 61 |
+
to: "save_image:images"
|
| 62 |
+
|
| 63 |
+
dynamic_lora_chains:
|
| 64 |
+
lora_chain:
|
| 65 |
+
template: "LoraLoader"
|
| 66 |
+
output_map:
|
| 67 |
+
"unet_loader:0": "model"
|
| 68 |
+
"clip_loader:0": "clip"
|
| 69 |
+
input_map:
|
| 70 |
+
"model": "model"
|
| 71 |
+
"clip": "clip"
|
| 72 |
+
end_input_map:
|
| 73 |
+
"model": ["ksampler:model"]
|
| 74 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 75 |
+
|
| 76 |
+
dynamic_reference_latent_chains:
|
| 77 |
+
reference_latent_chain:
|
| 78 |
+
ksampler_node: "ksampler"
|
| 79 |
+
vae_node: "vae_loader"
|
| 80 |
+
|
| 81 |
+
ui_map:
|
| 82 |
+
unet_name: "unet_loader:unet_name"
|
| 83 |
+
clip_name: "clip_loader:clip_name"
|
| 84 |
+
vae_name: "vae_loader:vae_name"
|
| 85 |
+
|
| 86 |
+
positive_prompt: "pos_prompt:text"
|
| 87 |
+
negative_prompt: "neg_prompt:text"
|
| 88 |
+
|
| 89 |
+
seed: "ksampler:seed"
|
| 90 |
+
steps: "ksampler:steps"
|
| 91 |
+
cfg: "ksampler:cfg"
|
| 92 |
+
sampler_name: "ksampler:sampler_name"
|
| 93 |
+
scheduler: "ksampler:scheduler"
|
| 94 |
+
denoise: "ksampler:denoise"
|
| 95 |
+
|
| 96 |
+
filename_prefix: "save_image:filename_prefix"
|
core/pipelines/workflow_recipes/_partials/conditioning/hidream.yaml
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load HiDream UNET"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load HiDream VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: QuadrupleCLIPLoader
|
| 12 |
+
title: "Load HiDream Quadruple CLIP"
|
| 13 |
+
|
| 14 |
+
model_sampler:
|
| 15 |
+
class_type: ModelSamplingSD3
|
| 16 |
+
title: "ModelSamplingSD3"
|
| 17 |
+
params:
|
| 18 |
+
shift: 6.0
|
| 19 |
+
|
| 20 |
+
connections:
|
| 21 |
+
- from: "unet_loader:0"
|
| 22 |
+
to: "model_sampler:model"
|
| 23 |
+
|
| 24 |
+
- from: "model_sampler:0"
|
| 25 |
+
to: "ksampler:model"
|
| 26 |
+
|
| 27 |
+
- from: "clip_loader:0"
|
| 28 |
+
to: "pos_prompt:clip"
|
| 29 |
+
- from: "clip_loader:0"
|
| 30 |
+
to: "neg_prompt:clip"
|
| 31 |
+
|
| 32 |
+
- from: "pos_prompt:0"
|
| 33 |
+
to: "ksampler:positive"
|
| 34 |
+
- from: "neg_prompt:0"
|
| 35 |
+
to: "ksampler:negative"
|
| 36 |
+
|
| 37 |
+
- from: "vae_loader:0"
|
| 38 |
+
to: "vae_decode:vae"
|
| 39 |
+
- from: "vae_loader:0"
|
| 40 |
+
to: "vae_encode:vae"
|
| 41 |
+
|
| 42 |
+
dynamic_conditioning_chains:
|
| 43 |
+
conditioning_chain:
|
| 44 |
+
ksampler_node: "ksampler"
|
| 45 |
+
clip_source: "clip_loader:0"
|
| 46 |
+
|
| 47 |
+
ui_map:
|
| 48 |
+
unet_name: "unet_loader:unet_name"
|
| 49 |
+
vae_name: "vae_loader:vae_name"
|
| 50 |
+
clip1_name: "clip_loader:clip_name1"
|
| 51 |
+
clip2_name: "clip_loader:clip_name2"
|
| 52 |
+
clip3_name: "clip_loader:clip_name3"
|
| 53 |
+
clip4_name: "clip_loader:clip_name4"
|
core/pipelines/workflow_recipes/_partials/conditioning/hunyuanimage.yaml
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Hunyuan UNET"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load Hunyuan VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: DualCLIPLoader
|
| 12 |
+
title: "Load Hunyuan Dual CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "hunyuan_image"
|
| 15 |
+
device: "default"
|
| 16 |
+
|
| 17 |
+
connections:
|
| 18 |
+
- from: "unet_loader:0"
|
| 19 |
+
to: "ksampler:model"
|
| 20 |
+
- from: "clip_loader:0"
|
| 21 |
+
to: "pos_prompt:clip"
|
| 22 |
+
- from: "clip_loader:0"
|
| 23 |
+
to: "neg_prompt:clip"
|
| 24 |
+
- from: "vae_loader:0"
|
| 25 |
+
to: "vae_decode:vae"
|
| 26 |
+
- from: "vae_loader:0"
|
| 27 |
+
to: "vae_encode:vae"
|
| 28 |
+
- from: "pos_prompt:0"
|
| 29 |
+
to: "ksampler:positive"
|
| 30 |
+
- from: "neg_prompt:0"
|
| 31 |
+
to: "ksampler:negative"
|
| 32 |
+
|
| 33 |
+
dynamic_conditioning_chains:
|
| 34 |
+
conditioning_chain:
|
| 35 |
+
ksampler_node: "ksampler"
|
| 36 |
+
clip_source: "clip_loader:0"
|
| 37 |
+
|
| 38 |
+
ui_map:
|
| 39 |
+
unet_name: "unet_loader:unet_name"
|
| 40 |
+
vae_name: "vae_loader:vae_name"
|
| 41 |
+
clip1_name: "clip_loader:clip_name1"
|
| 42 |
+
clip2_name: "clip_loader:clip_name2"
|
core/pipelines/workflow_recipes/_partials/conditioning/longcat-image.yaml
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: CLIPLoader
|
| 12 |
+
title: "Load CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "longcat_image"
|
| 15 |
+
device: "default"
|
| 16 |
+
|
| 17 |
+
cfg_norm:
|
| 18 |
+
class_type: CFGNorm
|
| 19 |
+
title: "CFGNorm"
|
| 20 |
+
params:
|
| 21 |
+
strength: 1.0
|
| 22 |
+
|
| 23 |
+
flux_guidance_pos:
|
| 24 |
+
class_type: FluxGuidance
|
| 25 |
+
title: "FluxGuidance (Positive)"
|
| 26 |
+
params:
|
| 27 |
+
guidance: 4.0
|
| 28 |
+
|
| 29 |
+
flux_guidance_neg:
|
| 30 |
+
class_type: FluxGuidance
|
| 31 |
+
title: "FluxGuidance (Negative)"
|
| 32 |
+
params:
|
| 33 |
+
guidance: 4.0
|
| 34 |
+
|
| 35 |
+
connections:
|
| 36 |
+
- from: "unet_loader:0"
|
| 37 |
+
to: "cfg_norm:model"
|
| 38 |
+
- from: "cfg_norm:0"
|
| 39 |
+
to: "ksampler:model"
|
| 40 |
+
|
| 41 |
+
- from: "clip_loader:0"
|
| 42 |
+
to: "pos_prompt:clip"
|
| 43 |
+
- from: "clip_loader:0"
|
| 44 |
+
to: "neg_prompt:clip"
|
| 45 |
+
|
| 46 |
+
- from: "pos_prompt:0"
|
| 47 |
+
to: "flux_guidance_pos:conditioning"
|
| 48 |
+
- from: "neg_prompt:0"
|
| 49 |
+
to: "flux_guidance_neg:conditioning"
|
| 50 |
+
|
| 51 |
+
- from: "flux_guidance_pos:0"
|
| 52 |
+
to: "ksampler:positive"
|
| 53 |
+
- from: "flux_guidance_neg:0"
|
| 54 |
+
to: "ksampler:negative"
|
| 55 |
+
|
| 56 |
+
- from: "vae_loader:0"
|
| 57 |
+
to: "vae_decode:vae"
|
| 58 |
+
- from: "vae_loader:0"
|
| 59 |
+
to: "vae_encode:vae"
|
| 60 |
+
|
| 61 |
+
dynamic_lora_chains:
|
| 62 |
+
lora_chain:
|
| 63 |
+
template: "LoraLoader"
|
| 64 |
+
output_map:
|
| 65 |
+
"unet_loader:0": "model"
|
| 66 |
+
"clip_loader:0": "clip"
|
| 67 |
+
input_map:
|
| 68 |
+
"model": "model"
|
| 69 |
+
"clip": "clip"
|
| 70 |
+
end_input_map:
|
| 71 |
+
"model": ["cfg_norm:model"]
|
| 72 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 73 |
+
|
| 74 |
+
dynamic_conditioning_chains:
|
| 75 |
+
conditioning_chain:
|
| 76 |
+
flux_guidance_node: "flux_guidance_pos"
|
| 77 |
+
ksampler_node: "ksampler"
|
| 78 |
+
clip_source: "clip_loader:0"
|
| 79 |
+
|
| 80 |
+
ui_map:
|
| 81 |
+
unet_name: "unet_loader:unet_name"
|
| 82 |
+
vae_name: "vae_loader:vae_name"
|
| 83 |
+
clip_name: "clip_loader:clip_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/lumina.yaml
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
ckpt_loader:
|
| 3 |
+
class_type: CheckpointLoaderSimple
|
| 4 |
+
title: "Load Checkpoint"
|
| 5 |
+
model_sampler:
|
| 6 |
+
class_type: ModelSamplingAuraFlow
|
| 7 |
+
title: "ModelSamplingAuraFlow"
|
| 8 |
+
params:
|
| 9 |
+
shift: 4.0
|
| 10 |
+
|
| 11 |
+
connections:
|
| 12 |
+
- from: "ckpt_loader:0"
|
| 13 |
+
to: "model_sampler:model"
|
| 14 |
+
- from: "model_sampler:0"
|
| 15 |
+
to: "ksampler:model"
|
| 16 |
+
|
| 17 |
+
- from: "ckpt_loader:1"
|
| 18 |
+
to: "pos_prompt:clip"
|
| 19 |
+
- from: "ckpt_loader:1"
|
| 20 |
+
to: "neg_prompt:clip"
|
| 21 |
+
- from: "pos_prompt:0"
|
| 22 |
+
to: "ksampler:positive"
|
| 23 |
+
- from: "neg_prompt:0"
|
| 24 |
+
to: "ksampler:negative"
|
| 25 |
+
|
| 26 |
+
- from: "ckpt_loader:2"
|
| 27 |
+
to: "vae_decode:vae"
|
| 28 |
+
- from: "ckpt_loader:2"
|
| 29 |
+
to: "vae_encode:vae"
|
| 30 |
+
|
| 31 |
+
dynamic_lora_chains:
|
| 32 |
+
lora_chain:
|
| 33 |
+
template: "LoraLoader"
|
| 34 |
+
start: "ckpt_loader"
|
| 35 |
+
output_map:
|
| 36 |
+
"0": "model"
|
| 37 |
+
"1": "clip"
|
| 38 |
+
input_map:
|
| 39 |
+
"model": "model"
|
| 40 |
+
"clip": "clip"
|
| 41 |
+
end_input_map:
|
| 42 |
+
"model": ["model_sampler:model"]
|
| 43 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 44 |
+
|
| 45 |
+
dynamic_conditioning_chains:
|
| 46 |
+
conditioning_chain:
|
| 47 |
+
ksampler_node: "ksampler"
|
| 48 |
+
clip_source: "ckpt_loader:1"
|
| 49 |
+
|
| 50 |
+
ui_map:
|
| 51 |
+
model_name: "ckpt_loader:ckpt_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/newbie-image.yaml
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: DualCLIPLoader
|
| 12 |
+
title: "Load Dual CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "newbie"
|
| 15 |
+
device: "default"
|
| 16 |
+
model_sampler:
|
| 17 |
+
class_type: ModelSamplingAuraFlow
|
| 18 |
+
title: "ModelSamplingAuraFlow"
|
| 19 |
+
params:
|
| 20 |
+
shift: 6
|
| 21 |
+
|
| 22 |
+
connections:
|
| 23 |
+
- from: "unet_loader:0"
|
| 24 |
+
to: "model_sampler:model"
|
| 25 |
+
- from: "model_sampler:0"
|
| 26 |
+
to: "ksampler:model"
|
| 27 |
+
|
| 28 |
+
- from: "clip_loader:0"
|
| 29 |
+
to: "pos_prompt:clip"
|
| 30 |
+
- from: "clip_loader:0"
|
| 31 |
+
to: "neg_prompt:clip"
|
| 32 |
+
|
| 33 |
+
- from: "pos_prompt:0"
|
| 34 |
+
to: "ksampler:positive"
|
| 35 |
+
- from: "neg_prompt:0"
|
| 36 |
+
to: "ksampler:negative"
|
| 37 |
+
|
| 38 |
+
- from: "vae_loader:0"
|
| 39 |
+
to: "vae_decode:vae"
|
| 40 |
+
- from: "vae_loader:0"
|
| 41 |
+
to: "vae_encode:vae"
|
| 42 |
+
|
| 43 |
+
dynamic_newbie_lora_chains:
|
| 44 |
+
lora_chain:
|
| 45 |
+
template: "NewBieLoraLoader"
|
| 46 |
+
output_map:
|
| 47 |
+
"unet_loader:0": "model"
|
| 48 |
+
"clip_loader:0": "clip"
|
| 49 |
+
input_map:
|
| 50 |
+
"model": "model"
|
| 51 |
+
"clip": "clip"
|
| 52 |
+
end_input_map:
|
| 53 |
+
"model": ["model_sampler:model"]
|
| 54 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 55 |
+
|
| 56 |
+
dynamic_conditioning_chains:
|
| 57 |
+
conditioning_chain:
|
| 58 |
+
ksampler_node: "ksampler"
|
| 59 |
+
clip_source: "clip_loader:0"
|
| 60 |
+
|
| 61 |
+
ui_map:
|
| 62 |
+
unet_name: "unet_loader:unet_name"
|
| 63 |
+
vae_name: "vae_loader:vae_name"
|
| 64 |
+
clip1_name: "clip_loader:clip_name1"
|
| 65 |
+
clip2_name: "clip_loader:clip_name2"
|
core/pipelines/workflow_recipes/_partials/conditioning/omnigen2.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: CLIPLoader
|
| 12 |
+
title: "Load CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "omnigen2"
|
| 15 |
+
device: "default"
|
| 16 |
+
|
| 17 |
+
connections:
|
| 18 |
+
- from: "unet_loader:0"
|
| 19 |
+
to: "ksampler:model"
|
| 20 |
+
- from: "clip_loader:0"
|
| 21 |
+
to: "pos_prompt:clip"
|
| 22 |
+
- from: "clip_loader:0"
|
| 23 |
+
to: "neg_prompt:clip"
|
| 24 |
+
- from: "pos_prompt:0"
|
| 25 |
+
to: "ksampler:positive"
|
| 26 |
+
- from: "neg_prompt:0"
|
| 27 |
+
to: "ksampler:negative"
|
| 28 |
+
- from: "vae_loader:0"
|
| 29 |
+
to: "vae_decode:vae"
|
| 30 |
+
- from: "vae_loader:0"
|
| 31 |
+
to: "vae_encode:vae"
|
| 32 |
+
|
| 33 |
+
dynamic_lora_chains:
|
| 34 |
+
lora_chain:
|
| 35 |
+
template: "LoraLoader"
|
| 36 |
+
output_map:
|
| 37 |
+
"unet_loader:0": "model"
|
| 38 |
+
"clip_loader:0": "clip"
|
| 39 |
+
input_map:
|
| 40 |
+
"model": "model"
|
| 41 |
+
"clip": "clip"
|
| 42 |
+
end_input_map:
|
| 43 |
+
"model": ["ksampler:model"]
|
| 44 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 45 |
+
|
| 46 |
+
dynamic_conditioning_chains:
|
| 47 |
+
conditioning_chain:
|
| 48 |
+
ksampler_node: "ksampler"
|
| 49 |
+
clip_source: "clip_loader:0"
|
| 50 |
+
|
| 51 |
+
dynamic_reference_latent_chains:
|
| 52 |
+
reference_latent_chain:
|
| 53 |
+
ksampler_node: "ksampler"
|
| 54 |
+
vae_node: "vae_loader"
|
| 55 |
+
|
| 56 |
+
ui_map:
|
| 57 |
+
unet_name: "unet_loader:unet_name"
|
| 58 |
+
vae_name: "vae_loader:vae_name"
|
| 59 |
+
clip_name: "clip_loader:clip_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/ovis-image.yaml
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: CLIPLoader
|
| 12 |
+
title: "Load CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "ovis"
|
| 15 |
+
device: "default"
|
| 16 |
+
model_sampler:
|
| 17 |
+
class_type: ModelSamplingAuraFlow
|
| 18 |
+
params:
|
| 19 |
+
shift: 3.0
|
| 20 |
+
|
| 21 |
+
connections:
|
| 22 |
+
- from: "unet_loader:0"
|
| 23 |
+
to: "model_sampler:model"
|
| 24 |
+
- from: "model_sampler:0"
|
| 25 |
+
to: "ksampler:model"
|
| 26 |
+
|
| 27 |
+
- from: "clip_loader:0"
|
| 28 |
+
to: "pos_prompt:clip"
|
| 29 |
+
- from: "clip_loader:0"
|
| 30 |
+
to: "neg_prompt:clip"
|
| 31 |
+
|
| 32 |
+
- from: "pos_prompt:0"
|
| 33 |
+
to: "ksampler:positive"
|
| 34 |
+
- from: "neg_prompt:0"
|
| 35 |
+
to: "ksampler:negative"
|
| 36 |
+
|
| 37 |
+
- from: "vae_loader:0"
|
| 38 |
+
to: "vae_decode:vae"
|
| 39 |
+
- from: "vae_loader:0"
|
| 40 |
+
to: "vae_encode:vae"
|
| 41 |
+
|
| 42 |
+
dynamic_conditioning_chains:
|
| 43 |
+
conditioning_chain:
|
| 44 |
+
ksampler_node: "ksampler"
|
| 45 |
+
clip_source: "clip_loader:0"
|
| 46 |
+
|
| 47 |
+
ui_map:
|
| 48 |
+
unet_name: "unet_loader:unet_name"
|
| 49 |
+
vae_name: "vae_loader:vae_name"
|
| 50 |
+
clip_name: "clip_loader:clip_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/qwen-image.yaml
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Qwen UNET"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load Qwen VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: CLIPLoader
|
| 12 |
+
title: "Load Qwen CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "qwen_image"
|
| 15 |
+
device: "default"
|
| 16 |
+
|
| 17 |
+
lora_loader:
|
| 18 |
+
class_type: LoraLoaderModelOnly
|
| 19 |
+
title: "Load Qwen Lightning LoRA"
|
| 20 |
+
params:
|
| 21 |
+
strength_model: 1.0
|
| 22 |
+
model_sampler:
|
| 23 |
+
class_type: ModelSamplingAuraFlow
|
| 24 |
+
title: "ModelSamplingAuraFlow"
|
| 25 |
+
params:
|
| 26 |
+
shift: 3.1
|
| 27 |
+
|
| 28 |
+
connections:
|
| 29 |
+
- from: "unet_loader:0"
|
| 30 |
+
to: "lora_loader:model"
|
| 31 |
+
- from: "lora_loader:0"
|
| 32 |
+
to: "model_sampler:model"
|
| 33 |
+
|
| 34 |
+
- from: "model_sampler:0"
|
| 35 |
+
to: "ksampler:model"
|
| 36 |
+
|
| 37 |
+
- from: "clip_loader:0"
|
| 38 |
+
to: "pos_prompt:clip"
|
| 39 |
+
- from: "clip_loader:0"
|
| 40 |
+
to: "neg_prompt:clip"
|
| 41 |
+
|
| 42 |
+
- from: "vae_loader:0"
|
| 43 |
+
to: "vae_decode:vae"
|
| 44 |
+
- from: "vae_loader:0"
|
| 45 |
+
to: "vae_encode:vae"
|
| 46 |
+
|
| 47 |
+
- from: "pos_prompt:0"
|
| 48 |
+
to: "ksampler:positive"
|
| 49 |
+
- from: "neg_prompt:0"
|
| 50 |
+
to: "ksampler:negative"
|
| 51 |
+
|
| 52 |
+
dynamic_lora_chains:
|
| 53 |
+
lora_chain:
|
| 54 |
+
template: "LoraLoader"
|
| 55 |
+
output_map:
|
| 56 |
+
"lora_loader:0": "model"
|
| 57 |
+
"clip_loader:0": "clip"
|
| 58 |
+
input_map:
|
| 59 |
+
"model": "model"
|
| 60 |
+
"clip": "clip"
|
| 61 |
+
end_input_map:
|
| 62 |
+
"model": ["model_sampler:model"]
|
| 63 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 64 |
+
|
| 65 |
+
dynamic_controlnet_chains:
|
| 66 |
+
controlnet_chain:
|
| 67 |
+
template: "ControlNetApplyAdvanced"
|
| 68 |
+
ksampler_node: "ksampler"
|
| 69 |
+
vae_source: "vae_loader:0"
|
| 70 |
+
|
| 71 |
+
dynamic_conditioning_chains:
|
| 72 |
+
conditioning_chain:
|
| 73 |
+
ksampler_node: "ksampler"
|
| 74 |
+
clip_source: "clip_loader:0"
|
| 75 |
+
|
| 76 |
+
ui_map:
|
| 77 |
+
unet_name: "unet_loader:unet_name"
|
| 78 |
+
vae_name: "vae_loader:vae_name"
|
| 79 |
+
clip_name: "clip_loader:clip_name"
|
| 80 |
+
lora_name: "lora_loader:lora_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/sd15.yaml
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
ckpt_loader:
|
| 3 |
+
class_type: CheckpointLoaderSimple
|
| 4 |
+
title: "Load Checkpoint"
|
| 5 |
+
clip_set_last_layer:
|
| 6 |
+
class_type: CLIPSetLastLayer
|
| 7 |
+
title: "CLIP Set Last Layer"
|
| 8 |
+
|
| 9 |
+
connections:
|
| 10 |
+
- from: "ckpt_loader:0"
|
| 11 |
+
to: "ksampler:model"
|
| 12 |
+
- from: "ckpt_loader:1"
|
| 13 |
+
to: "clip_set_last_layer:clip"
|
| 14 |
+
- from: "clip_set_last_layer:0"
|
| 15 |
+
to: "pos_prompt:clip"
|
| 16 |
+
- from: "clip_set_last_layer:0"
|
| 17 |
+
to: "neg_prompt:clip"
|
| 18 |
+
- from: "pos_prompt:0"
|
| 19 |
+
to: "ksampler:positive"
|
| 20 |
+
- from: "neg_prompt:0"
|
| 21 |
+
to: "ksampler:negative"
|
| 22 |
+
- from: "ckpt_loader:2"
|
| 23 |
+
to: "vae_decode:vae"
|
| 24 |
+
- from: "ckpt_loader:2"
|
| 25 |
+
to: "vae_encode:vae"
|
| 26 |
+
|
| 27 |
+
dynamic_lora_chains:
|
| 28 |
+
lora_chain:
|
| 29 |
+
template: "LoraLoader"
|
| 30 |
+
start: "clip_set_last_layer"
|
| 31 |
+
output_map:
|
| 32 |
+
"ckpt_loader:0": "model"
|
| 33 |
+
"0": "clip"
|
| 34 |
+
input_map:
|
| 35 |
+
"model": "model"
|
| 36 |
+
"clip": "clip"
|
| 37 |
+
end_input_map:
|
| 38 |
+
"model": ["ksampler:model"]
|
| 39 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 40 |
+
|
| 41 |
+
dynamic_controlnet_chains:
|
| 42 |
+
controlnet_chain:
|
| 43 |
+
template: "ControlNetApplyAdvanced"
|
| 44 |
+
ksampler_node: "ksampler"
|
| 45 |
+
vae_source: "ckpt_loader:2"
|
| 46 |
+
|
| 47 |
+
dynamic_ipadapter_chains:
|
| 48 |
+
ipadapter_chain:
|
| 49 |
+
end: "ksampler"
|
| 50 |
+
final_preset: "{{ ipadapter_final_preset }}"
|
| 51 |
+
final_weight: "{{ ipadapter_final_weight }}"
|
| 52 |
+
final_embeds_scaling: "{{ ipadapter_embeds_scaling }}"
|
| 53 |
+
final_loader_type: "{{ ipadapter_final_loader_type }}"
|
| 54 |
+
final_lora_strength: "{{ ipadapter_final_lora_strength }}"
|
| 55 |
+
|
| 56 |
+
dynamic_conditioning_chains:
|
| 57 |
+
conditioning_chain:
|
| 58 |
+
ksampler_node: "ksampler"
|
| 59 |
+
clip_source: "clip_set_last_layer:0"
|
| 60 |
+
|
| 61 |
+
ui_map:
|
| 62 |
+
model_name: "ckpt_loader:ckpt_name"
|
| 63 |
+
clip_skip: "clip_set_last_layer:stop_at_clip_layer"
|
core/pipelines/workflow_recipes/_partials/conditioning/sd35.yaml
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
ckpt_loader:
|
| 3 |
+
class_type: CheckpointLoaderSimple
|
| 4 |
+
title: "Load Checkpoint"
|
| 5 |
+
|
| 6 |
+
connections:
|
| 7 |
+
- from: "ckpt_loader:0"
|
| 8 |
+
to: "ksampler:model"
|
| 9 |
+
- from: "ckpt_loader:1"
|
| 10 |
+
to: "pos_prompt:clip"
|
| 11 |
+
- from: "ckpt_loader:1"
|
| 12 |
+
to: "neg_prompt:clip"
|
| 13 |
+
- from: "pos_prompt:0"
|
| 14 |
+
to: "ksampler:positive"
|
| 15 |
+
- from: "neg_prompt:0"
|
| 16 |
+
to: "ksampler:negative"
|
| 17 |
+
- from: "ckpt_loader:2"
|
| 18 |
+
to: "vae_decode:vae"
|
| 19 |
+
- from: "ckpt_loader:2"
|
| 20 |
+
to: "vae_encode:vae"
|
| 21 |
+
|
| 22 |
+
dynamic_lora_chains:
|
| 23 |
+
lora_chain:
|
| 24 |
+
template: "LoraLoader"
|
| 25 |
+
start: "ckpt_loader"
|
| 26 |
+
output_map:
|
| 27 |
+
"0": "model"
|
| 28 |
+
"1": "clip"
|
| 29 |
+
input_map:
|
| 30 |
+
"model": "model"
|
| 31 |
+
"clip": "clip"
|
| 32 |
+
end_input_map:
|
| 33 |
+
"model": ["ksampler:model"]
|
| 34 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 35 |
+
|
| 36 |
+
dynamic_controlnet_chains:
|
| 37 |
+
controlnet_chain:
|
| 38 |
+
template: "ControlNetApplyAdvanced"
|
| 39 |
+
ksampler_node: "ksampler"
|
| 40 |
+
vae_source: "ckpt_loader:2"
|
| 41 |
+
|
| 42 |
+
dynamic_sd3_ipadapter_chains:
|
| 43 |
+
sd3_ipadapter_chain:
|
| 44 |
+
ksampler_node: "ksampler"
|
| 45 |
+
|
| 46 |
+
dynamic_conditioning_chains:
|
| 47 |
+
conditioning_chain:
|
| 48 |
+
ksampler_node: "ksampler"
|
| 49 |
+
clip_source: "ckpt_loader:1"
|
| 50 |
+
|
| 51 |
+
ui_map:
|
| 52 |
+
model_name: "ckpt_loader:ckpt_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/sdxl.yaml
CHANGED
|
@@ -1,15 +1,7 @@
|
|
| 1 |
nodes:
|
| 2 |
ckpt_loader:
|
| 3 |
class_type: CheckpointLoaderSimple
|
| 4 |
-
title: "Load
|
| 5 |
-
|
| 6 |
-
pos_prompt:
|
| 7 |
-
class_type: CLIPTextEncode
|
| 8 |
-
title: "Positive Prompt Encoder"
|
| 9 |
-
|
| 10 |
-
neg_prompt:
|
| 11 |
-
class_type: CLIPTextEncode
|
| 12 |
-
title: "Negative Prompt Encoder"
|
| 13 |
|
| 14 |
connections:
|
| 15 |
- from: "ckpt_loader:0"
|
|
@@ -18,26 +10,22 @@ connections:
|
|
| 18 |
to: "pos_prompt:clip"
|
| 19 |
- from: "ckpt_loader:1"
|
| 20 |
to: "neg_prompt:clip"
|
| 21 |
-
|
| 22 |
-
- from: "ckpt_loader:2"
|
| 23 |
-
to: "vae_decode:vae"
|
| 24 |
-
|
| 25 |
- from: "pos_prompt:0"
|
| 26 |
to: "ksampler:positive"
|
| 27 |
- from: "neg_prompt:0"
|
| 28 |
to: "ksampler:negative"
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
dynamic_lora_chains:
|
| 36 |
lora_chain:
|
| 37 |
template: "LoraLoader"
|
|
|
|
| 38 |
output_map:
|
| 39 |
-
"
|
| 40 |
-
"
|
| 41 |
input_map:
|
| 42 |
"model": "model"
|
| 43 |
"clip": "clip"
|
|
@@ -57,8 +45,13 @@ dynamic_ipadapter_chains:
|
|
| 57 |
final_preset: "{{ ipadapter_final_preset }}"
|
| 58 |
final_weight: "{{ ipadapter_final_weight }}"
|
| 59 |
final_embeds_scaling: "{{ ipadapter_embeds_scaling }}"
|
|
|
|
|
|
|
| 60 |
|
| 61 |
dynamic_conditioning_chains:
|
| 62 |
conditioning_chain:
|
| 63 |
ksampler_node: "ksampler"
|
| 64 |
-
clip_source: "ckpt_loader:1"
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
nodes:
|
| 2 |
ckpt_loader:
|
| 3 |
class_type: CheckpointLoaderSimple
|
| 4 |
+
title: "Load Checkpoint"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
connections:
|
| 7 |
- from: "ckpt_loader:0"
|
|
|
|
| 10 |
to: "pos_prompt:clip"
|
| 11 |
- from: "ckpt_loader:1"
|
| 12 |
to: "neg_prompt:clip"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
- from: "pos_prompt:0"
|
| 14 |
to: "ksampler:positive"
|
| 15 |
- from: "neg_prompt:0"
|
| 16 |
to: "ksampler:negative"
|
| 17 |
+
- from: "ckpt_loader:2"
|
| 18 |
+
to: "vae_decode:vae"
|
| 19 |
+
- from: "ckpt_loader:2"
|
| 20 |
+
to: "vae_encode:vae"
|
| 21 |
+
|
|
|
|
| 22 |
dynamic_lora_chains:
|
| 23 |
lora_chain:
|
| 24 |
template: "LoraLoader"
|
| 25 |
+
start: "ckpt_loader"
|
| 26 |
output_map:
|
| 27 |
+
"0": "model"
|
| 28 |
+
"1": "clip"
|
| 29 |
input_map:
|
| 30 |
"model": "model"
|
| 31 |
"clip": "clip"
|
|
|
|
| 45 |
final_preset: "{{ ipadapter_final_preset }}"
|
| 46 |
final_weight: "{{ ipadapter_final_weight }}"
|
| 47 |
final_embeds_scaling: "{{ ipadapter_embeds_scaling }}"
|
| 48 |
+
final_loader_type: "{{ ipadapter_final_loader_type }}"
|
| 49 |
+
final_lora_strength: "{{ ipadapter_final_lora_strength }}"
|
| 50 |
|
| 51 |
dynamic_conditioning_chains:
|
| 52 |
conditioning_chain:
|
| 53 |
ksampler_node: "ksampler"
|
| 54 |
+
clip_source: "ckpt_loader:1"
|
| 55 |
+
|
| 56 |
+
ui_map:
|
| 57 |
+
model_name: "ckpt_loader:ckpt_name"
|
core/pipelines/workflow_recipes/_partials/conditioning/z-image.yaml
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
unet_loader:
|
| 3 |
+
class_type: UNETLoader
|
| 4 |
+
title: "Load Diffusion Model"
|
| 5 |
+
params:
|
| 6 |
+
weight_dtype: "default"
|
| 7 |
+
vae_loader:
|
| 8 |
+
class_type: VAELoader
|
| 9 |
+
title: "Load VAE"
|
| 10 |
+
clip_loader:
|
| 11 |
+
class_type: CLIPLoader
|
| 12 |
+
title: "Load CLIP"
|
| 13 |
+
params:
|
| 14 |
+
type: "lumina2"
|
| 15 |
+
device: "default"
|
| 16 |
+
model_sampler:
|
| 17 |
+
class_type: ModelSamplingAuraFlow
|
| 18 |
+
params:
|
| 19 |
+
shift: 3.0
|
| 20 |
+
|
| 21 |
+
connections:
|
| 22 |
+
- from: "unet_loader:0"
|
| 23 |
+
to: "model_sampler:model"
|
| 24 |
+
- from: "model_sampler:0"
|
| 25 |
+
to: "ksampler:model"
|
| 26 |
+
|
| 27 |
+
- from: "clip_loader:0"
|
| 28 |
+
to: "pos_prompt:clip"
|
| 29 |
+
- from: "clip_loader:0"
|
| 30 |
+
to: "neg_prompt:clip"
|
| 31 |
+
|
| 32 |
+
- from: "pos_prompt:0"
|
| 33 |
+
to: "ksampler:positive"
|
| 34 |
+
- from: "neg_prompt:0"
|
| 35 |
+
to: "ksampler:negative"
|
| 36 |
+
|
| 37 |
+
- from: "vae_loader:0"
|
| 38 |
+
to: "vae_decode:vae"
|
| 39 |
+
- from: "vae_loader:0"
|
| 40 |
+
to: "vae_encode:vae"
|
| 41 |
+
|
| 42 |
+
dynamic_lora_chains:
|
| 43 |
+
lora_chain:
|
| 44 |
+
template: "LoraLoader"
|
| 45 |
+
output_map:
|
| 46 |
+
"unet_loader:0": "model"
|
| 47 |
+
"clip_loader:0": "clip"
|
| 48 |
+
input_map:
|
| 49 |
+
"model": "model"
|
| 50 |
+
"clip": "clip"
|
| 51 |
+
end_input_map:
|
| 52 |
+
"model": ["model_sampler:model"]
|
| 53 |
+
"clip": ["pos_prompt:clip", "neg_prompt:clip"]
|
| 54 |
+
|
| 55 |
+
dynamic_diffsynth_controlnet_chains:
|
| 56 |
+
diffsynth_controlnet_chain:
|
| 57 |
+
template: "QwenImageDiffsynthControlnet"
|
| 58 |
+
model_sampler_node: "model_sampler"
|
| 59 |
+
ksampler_node: "ksampler"
|
| 60 |
+
vae_source: "vae_loader:0"
|
| 61 |
+
|
| 62 |
+
ui_map:
|
| 63 |
+
unet_name: "unet_loader:unet_name"
|
| 64 |
+
vae_name: "vae_loader:vae_name"
|
| 65 |
+
clip_name: "clip_loader:clip_name"
|
core/pipelines/workflow_recipes/_partials/input/hires_fix.yaml
CHANGED
|
@@ -1,15 +1,16 @@
|
|
| 1 |
nodes:
|
| 2 |
input_image_loader:
|
| 3 |
class_type: LoadImage
|
| 4 |
-
|
| 5 |
vae_encode:
|
| 6 |
class_type: VAEEncode
|
| 7 |
-
|
| 8 |
latent_upscaler:
|
| 9 |
class_type: LatentUpscaleBy
|
| 10 |
-
|
| 11 |
latent_source:
|
| 12 |
class_type: RepeatLatentBatch
|
|
|
|
| 13 |
|
| 14 |
connections:
|
| 15 |
- from: "input_image_loader:0"
|
|
|
|
| 1 |
nodes:
|
| 2 |
input_image_loader:
|
| 3 |
class_type: LoadImage
|
| 4 |
+
title: "Load Input Image"
|
| 5 |
vae_encode:
|
| 6 |
class_type: VAEEncode
|
| 7 |
+
title: "VAE Encode (Hires Pre-step)"
|
| 8 |
latent_upscaler:
|
| 9 |
class_type: LatentUpscaleBy
|
| 10 |
+
title: "Upscale Latent By"
|
| 11 |
latent_source:
|
| 12 |
class_type: RepeatLatentBatch
|
| 13 |
+
title: "Repeat Latent Batch for Hires"
|
| 14 |
|
| 15 |
connections:
|
| 16 |
- from: "input_image_loader:0"
|
core/pipelines/workflow_recipes/_partials/input/img2img.yaml
CHANGED
|
@@ -1,12 +1,13 @@
|
|
| 1 |
nodes:
|
| 2 |
input_image_loader:
|
| 3 |
class_type: LoadImage
|
| 4 |
-
|
| 5 |
vae_encode:
|
| 6 |
class_type: VAEEncode
|
| 7 |
-
|
| 8 |
latent_source:
|
| 9 |
class_type: RepeatLatentBatch
|
|
|
|
| 10 |
|
| 11 |
connections:
|
| 12 |
- from: "input_image_loader:0"
|
|
|
|
| 1 |
nodes:
|
| 2 |
input_image_loader:
|
| 3 |
class_type: LoadImage
|
| 4 |
+
title: "Load Input Image"
|
| 5 |
vae_encode:
|
| 6 |
class_type: VAEEncode
|
| 7 |
+
title: "VAE Encode (Img2Img)"
|
| 8 |
latent_source:
|
| 9 |
class_type: RepeatLatentBatch
|
| 10 |
+
title: "Repeat Latent Batch"
|
| 11 |
|
| 12 |
connections:
|
| 13 |
- from: "input_image_loader:0"
|
core/pipelines/workflow_recipes/_partials/input/inpaint.yaml
CHANGED
|
@@ -2,24 +2,22 @@ nodes:
|
|
| 2 |
inpaint_loader:
|
| 3 |
class_type: LoadImage
|
| 4 |
title: "Load Inpaint Image+Mask"
|
| 5 |
-
|
| 6 |
vae_encode:
|
| 7 |
class_type: VAEEncodeForInpaint
|
| 8 |
-
|
| 9 |
-
grow_mask_by: 6
|
| 10 |
-
|
| 11 |
latent_source:
|
| 12 |
class_type: RepeatLatentBatch
|
| 13 |
-
|
|
|
|
| 14 |
connections:
|
| 15 |
- from: "inpaint_loader:0"
|
| 16 |
to: "vae_encode:pixels"
|
| 17 |
- from: "inpaint_loader:1"
|
| 18 |
to: "vae_encode:mask"
|
| 19 |
-
|
| 20 |
- from: "vae_encode:0"
|
| 21 |
to: "latent_source:samples"
|
| 22 |
|
| 23 |
ui_map:
|
| 24 |
-
|
| 25 |
-
batch_size: "latent_source:amount"
|
|
|
|
|
|
| 2 |
inpaint_loader:
|
| 3 |
class_type: LoadImage
|
| 4 |
title: "Load Inpaint Image+Mask"
|
|
|
|
| 5 |
vae_encode:
|
| 6 |
class_type: VAEEncodeForInpaint
|
| 7 |
+
title: "VAE Encode (for Inpainting)"
|
|
|
|
|
|
|
| 8 |
latent_source:
|
| 9 |
class_type: RepeatLatentBatch
|
| 10 |
+
title: "Repeat Latent Batch"
|
| 11 |
+
|
| 12 |
connections:
|
| 13 |
- from: "inpaint_loader:0"
|
| 14 |
to: "vae_encode:pixels"
|
| 15 |
- from: "inpaint_loader:1"
|
| 16 |
to: "vae_encode:mask"
|
|
|
|
| 17 |
- from: "vae_encode:0"
|
| 18 |
to: "latent_source:samples"
|
| 19 |
|
| 20 |
ui_map:
|
| 21 |
+
input_image: "inpaint_loader:image"
|
| 22 |
+
batch_size: "latent_source:amount"
|
| 23 |
+
grow_mask_by: "vae_encode:grow_mask_by"
|
core/pipelines/workflow_recipes/_partials/input/outpaint.yaml
CHANGED
|
@@ -1,38 +1,41 @@
|
|
| 1 |
nodes:
|
| 2 |
input_image_loader:
|
| 3 |
class_type: LoadImage
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
pad_image:
|
| 6 |
class_type: ImagePadForOutpaint
|
| 7 |
-
|
| 8 |
-
feathering: 10
|
| 9 |
-
|
| 10 |
vae_encode:
|
| 11 |
class_type: VAEEncodeForInpaint
|
| 12 |
-
|
| 13 |
-
grow_mask_by: 6
|
| 14 |
-
|
| 15 |
latent_source:
|
| 16 |
class_type: RepeatLatentBatch
|
|
|
|
| 17 |
|
| 18 |
connections:
|
| 19 |
- from: "input_image_loader:0"
|
|
|
|
|
|
|
| 20 |
to: "pad_image:image"
|
| 21 |
-
|
| 22 |
- from: "pad_image:0"
|
| 23 |
to: "vae_encode:pixels"
|
| 24 |
- from: "pad_image:1"
|
| 25 |
to: "vae_encode:mask"
|
| 26 |
-
|
| 27 |
- from: "vae_encode:0"
|
| 28 |
to: "latent_source:samples"
|
| 29 |
|
| 30 |
ui_map:
|
| 31 |
input_image: "input_image_loader:image"
|
| 32 |
-
|
| 33 |
left: "pad_image:left"
|
| 34 |
top: "pad_image:top"
|
| 35 |
right: "pad_image:right"
|
| 36 |
bottom: "pad_image:bottom"
|
| 37 |
-
|
|
|
|
| 38 |
batch_size: "latent_source:amount"
|
|
|
|
| 1 |
nodes:
|
| 2 |
input_image_loader:
|
| 3 |
class_type: LoadImage
|
| 4 |
+
title: "Load Image for Outpaint"
|
| 5 |
+
scale_image:
|
| 6 |
+
class_type: ImageScaleToTotalPixels
|
| 7 |
+
title: "Scale Image to Total Pixels"
|
| 8 |
+
params:
|
| 9 |
+
upscale_method: "nearest-exact"
|
| 10 |
pad_image:
|
| 11 |
class_type: ImagePadForOutpaint
|
| 12 |
+
title: "Pad Image for Outpainting"
|
|
|
|
|
|
|
| 13 |
vae_encode:
|
| 14 |
class_type: VAEEncodeForInpaint
|
| 15 |
+
title: "VAE Encode (for Inpainting)"
|
|
|
|
|
|
|
| 16 |
latent_source:
|
| 17 |
class_type: RepeatLatentBatch
|
| 18 |
+
title: "Repeat Latent Batch"
|
| 19 |
|
| 20 |
connections:
|
| 21 |
- from: "input_image_loader:0"
|
| 22 |
+
to: "scale_image:image"
|
| 23 |
+
- from: "scale_image:0"
|
| 24 |
to: "pad_image:image"
|
|
|
|
| 25 |
- from: "pad_image:0"
|
| 26 |
to: "vae_encode:pixels"
|
| 27 |
- from: "pad_image:1"
|
| 28 |
to: "vae_encode:mask"
|
|
|
|
| 29 |
- from: "vae_encode:0"
|
| 30 |
to: "latent_source:samples"
|
| 31 |
|
| 32 |
ui_map:
|
| 33 |
input_image: "input_image_loader:image"
|
| 34 |
+
megapixels: "scale_image:megapixels"
|
| 35 |
left: "pad_image:left"
|
| 36 |
top: "pad_image:top"
|
| 37 |
right: "pad_image:right"
|
| 38 |
bottom: "pad_image:bottom"
|
| 39 |
+
feathering: "pad_image:feathering"
|
| 40 |
+
grow_mask_by: "vae_encode:grow_mask_by"
|
| 41 |
batch_size: "latent_source:amount"
|
core/pipelines/workflow_recipes/_partials/input/txt2img.yaml
CHANGED
|
@@ -1,8 +1,2 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
class_type: EmptyLatentImage
|
| 4 |
-
|
| 5 |
-
ui_map:
|
| 6 |
-
width: "latent_source:width"
|
| 7 |
-
height: "latent_source:height"
|
| 8 |
-
batch_size: "latent_source:batch_size"
|
|
|
|
| 1 |
+
imports:
|
| 2 |
+
- "txt2img_{{ latent_type }}.yaml"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
core/pipelines/workflow_recipes/_partials/input/txt2img_chroma_radiance_latent.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
latent_source:
|
| 3 |
+
class_type: "EmptyChromaRadianceLatentImage"
|
| 4 |
+
title: "EmptyChromaRadianceLatentImage"
|
| 5 |
+
|
| 6 |
+
connections: []
|
| 7 |
+
|
| 8 |
+
ui_map:
|
| 9 |
+
width: "latent_source:width"
|
| 10 |
+
height: "latent_source:height"
|
| 11 |
+
batch_size: "latent_source:batch_size"
|
core/pipelines/workflow_recipes/_partials/input/txt2img_flux2_latent.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
latent_source:
|
| 3 |
+
class_type: "EmptyFlux2LatentImage"
|
| 4 |
+
title: "Empty Flux 2 Latent"
|
| 5 |
+
|
| 6 |
+
connections: []
|
| 7 |
+
|
| 8 |
+
ui_map:
|
| 9 |
+
width: "latent_source:width"
|
| 10 |
+
height: "latent_source:height"
|
| 11 |
+
batch_size: "latent_source:batch_size"
|
core/pipelines/workflow_recipes/_partials/input/txt2img_hunyuan_latent.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
latent_source:
|
| 3 |
+
class_type: "EmptyHunyuanImageLatent"
|
| 4 |
+
title: "EmptyHunyuanImageLatent"
|
| 5 |
+
|
| 6 |
+
connections: []
|
| 7 |
+
|
| 8 |
+
ui_map:
|
| 9 |
+
width: "latent_source:width"
|
| 10 |
+
height: "latent_source:height"
|
| 11 |
+
batch_size: "latent_source:batch_size"
|
core/pipelines/workflow_recipes/_partials/input/txt2img_latent.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
latent_source:
|
| 3 |
+
class_type: "{{ latent_generator_template }}"
|
| 4 |
+
title: "Empty Latent Image"
|
| 5 |
+
|
| 6 |
+
connections: []
|
| 7 |
+
|
| 8 |
+
ui_map:
|
| 9 |
+
width: "latent_source:width"
|
| 10 |
+
height: "latent_source:height"
|
| 11 |
+
batch_size: "latent_source:batch_size"
|
core/pipelines/workflow_recipes/_partials/input/txt2img_sd3_latent.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nodes:
|
| 2 |
+
latent_source:
|
| 3 |
+
class_type: "EmptySD3LatentImage"
|
| 4 |
+
title: "EmptySD3LatentImage"
|
| 5 |
+
|
| 6 |
+
connections: []
|
| 7 |
+
|
| 8 |
+
ui_map:
|
| 9 |
+
width: "latent_source:width"
|
| 10 |
+
height: "latent_source:height"
|
| 11 |
+
batch_size: "latent_source:batch_size"
|
core/pipelines/workflow_recipes/sd_unified_recipe.yaml
CHANGED
|
@@ -1,10 +1,8 @@
|
|
| 1 |
imports:
|
| 2 |
-
- "_partials/
|
| 3 |
- "_partials/input/{{ task_type }}.yaml"
|
| 4 |
-
- "_partials/conditioning/
|
| 5 |
|
| 6 |
connections:
|
| 7 |
- from: "latent_source:0"
|
| 8 |
-
to: "ksampler:latent_image"
|
| 9 |
-
- from: "ckpt_loader:2"
|
| 10 |
-
to: "vae_encode:vae"
|
|
|
|
| 1 |
imports:
|
| 2 |
+
- "_partials/_base_sampler_sd.yaml"
|
| 3 |
- "_partials/input/{{ task_type }}.yaml"
|
| 4 |
+
- "_partials/conditioning/{{ model_type }}.yaml"
|
| 5 |
|
| 6 |
connections:
|
| 7 |
- from: "latent_source:0"
|
| 8 |
+
to: "ksampler:latent_image"
|
|
|
|
|
|
core/settings.py
CHANGED
|
@@ -6,18 +6,40 @@ CHECKPOINT_DIR = "models/checkpoints"
|
|
| 6 |
LORA_DIR = "models/loras"
|
| 7 |
EMBEDDING_DIR = "models/embeddings"
|
| 8 |
CONTROLNET_DIR = "models/controlnet"
|
|
|
|
| 9 |
DIFFUSION_MODELS_DIR = "models/diffusion_models"
|
| 10 |
VAE_DIR = "models/vae"
|
| 11 |
TEXT_ENCODERS_DIR = "models/text_encoders"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
INPUT_DIR = "input"
|
| 13 |
OUTPUT_DIR = "output"
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 16 |
_MODEL_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_list.yaml')
|
| 17 |
_FILE_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'file_list.yaml')
|
| 18 |
_IPADAPTER_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'ipadapter.yaml')
|
| 19 |
_CONSTANTS_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'constants.yaml')
|
| 20 |
-
|
|
|
|
|
|
|
| 21 |
|
| 22 |
def load_constants_from_yaml(filepath=_CONSTANTS_PATH):
|
| 23 |
if not os.path.exists(filepath):
|
|
@@ -26,6 +48,27 @@ def load_constants_from_yaml(filepath=_CONSTANTS_PATH):
|
|
| 26 |
with open(filepath, 'r', encoding='utf-8') as f:
|
| 27 |
return yaml.safe_load(f)
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
def load_file_download_map(filepath=_FILE_LIST_PATH):
|
| 30 |
if not os.path.exists(filepath):
|
| 31 |
raise FileNotFoundError(f"The file list (for downloads) was not found at: {filepath}")
|
|
@@ -58,27 +101,40 @@ def load_models_from_yaml(model_list_filepath=_MODEL_LIST_PATH, download_map=Non
|
|
| 58 |
}
|
| 59 |
category_map_names = {
|
| 60 |
"Checkpoint": "MODEL_MAP_CHECKPOINT",
|
|
|
|
| 61 |
}
|
| 62 |
|
| 63 |
-
for category,
|
| 64 |
if category in category_map_names:
|
| 65 |
map_name = category_map_names[category]
|
| 66 |
-
if not isinstance(
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
return model_maps
|
| 84 |
|
|
@@ -88,13 +144,43 @@ try:
|
|
| 88 |
MODEL_MAP_CHECKPOINT = loaded_maps["MODEL_MAP_CHECKPOINT"]
|
| 89 |
ALL_MODEL_MAP = loaded_maps["ALL_MODEL_MAP"]
|
| 90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
MODEL_TYPE_MAP = {k: v[2] for k, v in ALL_MODEL_MAP.items()}
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
except Exception as e:
|
| 94 |
print(f"FATAL: Could not load model configuration from YAML. Error: {e}")
|
| 95 |
ALL_FILE_DOWNLOAD_MAP = {}
|
| 96 |
MODEL_MAP_CHECKPOINT, ALL_MODEL_MAP = {}, {}
|
| 97 |
MODEL_TYPE_MAP = {}
|
|
|
|
| 98 |
|
| 99 |
|
| 100 |
try:
|
|
@@ -104,13 +190,16 @@ try:
|
|
| 104 |
MAX_CONDITIONINGS = _constants.get('MAX_CONDITIONINGS', 10)
|
| 105 |
MAX_CONTROLNETS = _constants.get('MAX_CONTROLNETS', 5)
|
| 106 |
MAX_IPADAPTERS = _constants.get('MAX_IPADAPTERS', 5)
|
| 107 |
-
LORA_SOURCE_CHOICES = _constants.get('LORA_SOURCE_CHOICES', ["Civitai", "
|
| 108 |
RESOLUTION_MAP = _constants.get('RESOLUTION_MAP', {})
|
|
|
|
|
|
|
|
|
|
| 109 |
except Exception as e:
|
| 110 |
print(f"FATAL: Could not load constants from YAML. Error: {e}")
|
| 111 |
MAX_LORAS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_CONTROLNETS, MAX_IPADAPTERS = 5, 5, 10, 5, 5
|
| 112 |
-
LORA_SOURCE_CHOICES = ["Civitai", "
|
| 113 |
RESOLUTION_MAP = {}
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
|
|
|
| 6 |
LORA_DIR = "models/loras"
|
| 7 |
EMBEDDING_DIR = "models/embeddings"
|
| 8 |
CONTROLNET_DIR = "models/controlnet"
|
| 9 |
+
MODEL_PATCHES_DIR = "models/model_patches"
|
| 10 |
DIFFUSION_MODELS_DIR = "models/diffusion_models"
|
| 11 |
VAE_DIR = "models/vae"
|
| 12 |
TEXT_ENCODERS_DIR = "models/text_encoders"
|
| 13 |
+
STYLE_MODELS_DIR = "models/style_models"
|
| 14 |
+
CLIP_VISION_DIR = "models/clip_vision"
|
| 15 |
+
IPADAPTER_DIR = "models/ipadapter"
|
| 16 |
+
IPADAPTER_FLUX_DIR = "models/ipadapter-flux"
|
| 17 |
INPUT_DIR = "input"
|
| 18 |
OUTPUT_DIR = "output"
|
| 19 |
|
| 20 |
+
CATEGORY_TO_DIR_MAP = {
|
| 21 |
+
"diffusion_models": DIFFUSION_MODELS_DIR,
|
| 22 |
+
"text_encoders": TEXT_ENCODERS_DIR,
|
| 23 |
+
"vae": VAE_DIR,
|
| 24 |
+
"checkpoints": CHECKPOINT_DIR,
|
| 25 |
+
"loras": LORA_DIR,
|
| 26 |
+
"controlnet": CONTROLNET_DIR,
|
| 27 |
+
"model_patches": MODEL_PATCHES_DIR,
|
| 28 |
+
"embeddings": EMBEDDING_DIR,
|
| 29 |
+
"style_models": STYLE_MODELS_DIR,
|
| 30 |
+
"clip_vision": CLIP_VISION_DIR,
|
| 31 |
+
"ipadapter": IPADAPTER_DIR,
|
| 32 |
+
"ipadapter-flux": IPADAPTER_FLUX_DIR
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 36 |
_MODEL_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_list.yaml')
|
| 37 |
_FILE_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'file_list.yaml')
|
| 38 |
_IPADAPTER_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'ipadapter.yaml')
|
| 39 |
_CONSTANTS_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'constants.yaml')
|
| 40 |
+
_MODEL_ARCHITECTURES_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_architectures.yaml')
|
| 41 |
+
_IMAGE_GEN_FEATURES_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'image_gen_features.yaml')
|
| 42 |
+
_MODEL_DEFAULTS_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_defaults.yaml')
|
| 43 |
|
| 44 |
def load_constants_from_yaml(filepath=_CONSTANTS_PATH):
|
| 45 |
if not os.path.exists(filepath):
|
|
|
|
| 48 |
with open(filepath, 'r', encoding='utf-8') as f:
|
| 49 |
return yaml.safe_load(f)
|
| 50 |
|
| 51 |
+
def load_architectures_config(filepath=_MODEL_ARCHITECTURES_PATH):
|
| 52 |
+
if not os.path.exists(filepath):
|
| 53 |
+
print(f"Warning: Architectures file not found at {filepath}.")
|
| 54 |
+
return {}
|
| 55 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 56 |
+
return yaml.safe_load(f)
|
| 57 |
+
|
| 58 |
+
def load_features_config(filepath=_IMAGE_GEN_FEATURES_PATH):
|
| 59 |
+
if not os.path.exists(filepath):
|
| 60 |
+
print(f"Warning: Features file not found at {filepath}.")
|
| 61 |
+
return {}
|
| 62 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 63 |
+
return yaml.safe_load(f)
|
| 64 |
+
|
| 65 |
+
def load_model_defaults(filepath=_MODEL_DEFAULTS_PATH):
|
| 66 |
+
if not os.path.exists(filepath):
|
| 67 |
+
print(f"Warning: Model defaults file not found at {filepath}.")
|
| 68 |
+
return {}
|
| 69 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 70 |
+
return yaml.safe_load(f)
|
| 71 |
+
|
| 72 |
def load_file_download_map(filepath=_FILE_LIST_PATH):
|
| 73 |
if not os.path.exists(filepath):
|
| 74 |
raise FileNotFoundError(f"The file list (for downloads) was not found at: {filepath}")
|
|
|
|
| 101 |
}
|
| 102 |
category_map_names = {
|
| 103 |
"Checkpoint": "MODEL_MAP_CHECKPOINT",
|
| 104 |
+
"Checkpoints": "MODEL_MAP_CHECKPOINT"
|
| 105 |
}
|
| 106 |
|
| 107 |
+
for category, architectures in model_data.items():
|
| 108 |
if category in category_map_names:
|
| 109 |
map_name = category_map_names[category]
|
| 110 |
+
if not isinstance(architectures, dict): continue
|
| 111 |
+
|
| 112 |
+
for arch, arch_data in architectures.items():
|
| 113 |
+
if not isinstance(arch_data, dict): continue
|
| 114 |
+
|
| 115 |
+
latent_type = arch_data.get('latent_type', 'latent')
|
| 116 |
+
models = arch_data.get('models', [])
|
| 117 |
+
if not isinstance(models, list): continue
|
| 118 |
|
| 119 |
+
for model in models:
|
| 120 |
+
display_name = model['display_name']
|
| 121 |
+
path_or_components = model.get('path') or model.get('components')
|
| 122 |
+
mod_category = model.get('category', None)
|
| 123 |
+
|
| 124 |
+
repo_id = ''
|
| 125 |
+
if isinstance(path_or_components, str):
|
| 126 |
+
download_info = download_map.get(path_or_components, {})
|
| 127 |
+
repo_id = download_info.get('repo_id', '')
|
| 128 |
+
|
| 129 |
+
model_tuple = (
|
| 130 |
+
repo_id,
|
| 131 |
+
path_or_components,
|
| 132 |
+
arch,
|
| 133 |
+
latent_type,
|
| 134 |
+
mod_category
|
| 135 |
+
)
|
| 136 |
+
model_maps[map_name][display_name] = model_tuple
|
| 137 |
+
model_maps["ALL_MODEL_MAP"][display_name] = model_tuple
|
| 138 |
|
| 139 |
return model_maps
|
| 140 |
|
|
|
|
| 144 |
MODEL_MAP_CHECKPOINT = loaded_maps["MODEL_MAP_CHECKPOINT"]
|
| 145 |
ALL_MODEL_MAP = loaded_maps["ALL_MODEL_MAP"]
|
| 146 |
|
| 147 |
+
category_to_model_type = {
|
| 148 |
+
"diffusion_models": "UNET",
|
| 149 |
+
"text_encoders": "TEXT_ENCODER",
|
| 150 |
+
"vae": "VAE",
|
| 151 |
+
"checkpoints": "SDXL",
|
| 152 |
+
"loras": "LORA",
|
| 153 |
+
"controlnet": "CONTROLNET",
|
| 154 |
+
"model_patches": "MODEL_PATCH",
|
| 155 |
+
"style_models": "STYLE",
|
| 156 |
+
"clip_vision": "CLIP_VISION",
|
| 157 |
+
"ipadapter": "IPADAPTER",
|
| 158 |
+
"ipadapter-flux": "IPADAPTER_FLUX"
|
| 159 |
+
}
|
| 160 |
+
for filename, file_info in ALL_FILE_DOWNLOAD_MAP.items():
|
| 161 |
+
if filename not in ALL_MODEL_MAP:
|
| 162 |
+
category = file_info.get('category')
|
| 163 |
+
model_type = category_to_model_type.get(category, 'UNKNOWN')
|
| 164 |
+
repo_id = file_info.get('repo_id', '')
|
| 165 |
+
ALL_MODEL_MAP[filename] = (repo_id, filename, model_type, None, None)
|
| 166 |
+
|
| 167 |
MODEL_TYPE_MAP = {k: v[2] for k, v in ALL_MODEL_MAP.items()}
|
| 168 |
|
| 169 |
+
ARCH_CATEGORIES_MAP = {}
|
| 170 |
+
for display_name, info in MODEL_MAP_CHECKPOINT.items():
|
| 171 |
+
arch = info[2]
|
| 172 |
+
cat = info[4] if len(info) > 4 else None
|
| 173 |
+
if arch not in ARCH_CATEGORIES_MAP:
|
| 174 |
+
ARCH_CATEGORIES_MAP[arch] = []
|
| 175 |
+
if cat and cat not in ARCH_CATEGORIES_MAP[arch]:
|
| 176 |
+
ARCH_CATEGORIES_MAP[arch].append(cat)
|
| 177 |
+
|
| 178 |
except Exception as e:
|
| 179 |
print(f"FATAL: Could not load model configuration from YAML. Error: {e}")
|
| 180 |
ALL_FILE_DOWNLOAD_MAP = {}
|
| 181 |
MODEL_MAP_CHECKPOINT, ALL_MODEL_MAP = {}, {}
|
| 182 |
MODEL_TYPE_MAP = {}
|
| 183 |
+
ARCH_CATEGORIES_MAP = {}
|
| 184 |
|
| 185 |
|
| 186 |
try:
|
|
|
|
| 190 |
MAX_CONDITIONINGS = _constants.get('MAX_CONDITIONINGS', 10)
|
| 191 |
MAX_CONTROLNETS = _constants.get('MAX_CONTROLNETS', 5)
|
| 192 |
MAX_IPADAPTERS = _constants.get('MAX_IPADAPTERS', 5)
|
| 193 |
+
LORA_SOURCE_CHOICES = _constants.get('LORA_SOURCE_CHOICES', ["Civitai", "File"])
|
| 194 |
RESOLUTION_MAP = _constants.get('RESOLUTION_MAP', {})
|
| 195 |
+
ARCHITECTURES_CONFIG = load_architectures_config()
|
| 196 |
+
FEATURES_CONFIG = load_features_config()
|
| 197 |
+
MODEL_DEFAULTS_CONFIG = load_model_defaults()
|
| 198 |
except Exception as e:
|
| 199 |
print(f"FATAL: Could not load constants from YAML. Error: {e}")
|
| 200 |
MAX_LORAS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_CONTROLNETS, MAX_IPADAPTERS = 5, 5, 10, 5, 5
|
| 201 |
+
LORA_SOURCE_CHOICES = ["Civitai", "File"]
|
| 202 |
RESOLUTION_MAP = {}
|
| 203 |
+
ARCHITECTURES_CONFIG = {}
|
| 204 |
+
FEATURES_CONFIG = {}
|
| 205 |
+
MODEL_DEFAULTS_CONFIG = {}
|
requirements.txt
CHANGED
|
@@ -58,4 +58,5 @@ svglib
|
|
| 58 |
trimesh[easy]
|
| 59 |
yacs
|
| 60 |
yapf
|
| 61 |
-
onnxruntime-gpu
|
|
|
|
|
|
| 58 |
trimesh[easy]
|
| 59 |
yacs
|
| 60 |
yapf
|
| 61 |
+
onnxruntime-gpu
|
| 62 |
+
diffusers
|
ui/events.py
CHANGED
|
@@ -8,8 +8,7 @@ from utils.app_utils import *
|
|
| 8 |
from core.generation_logic import *
|
| 9 |
from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
|
| 10 |
|
| 11 |
-
from
|
| 12 |
-
from utils.app_utils import PREPROCESSOR_MODEL_MAP, PREPROCESSOR_PARAMETER_MAP, save_uploaded_file_with_hash
|
| 13 |
from ui.shared.ui_components import RESOLUTION_MAP, MAX_CONTROLNETS, MAX_IPADAPTERS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_LORAS
|
| 14 |
|
| 15 |
|
|
@@ -22,10 +21,74 @@ def load_controlnet_config():
|
|
| 22 |
with open(_CN_MODEL_LIST_PATH, 'r', encoding='utf-8') as f:
|
| 23 |
config = yaml.safe_load(f)
|
| 24 |
print("--- ✅ controlnet_models.yaml loaded successfully ---")
|
| 25 |
-
return config.get("ControlNet", {})
|
| 26 |
except Exception as e:
|
| 27 |
print(f"Error loading controlnet_models.yaml: {e}")
|
| 28 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
@lru_cache(maxsize=1)
|
| 31 |
def load_ipadapter_config():
|
|
@@ -42,118 +105,96 @@ def load_ipadapter_config():
|
|
| 42 |
return {}
|
| 43 |
|
| 44 |
|
| 45 |
-
def
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
fn=update_cn_input_visibility,
|
| 53 |
-
inputs=[ui_components["cn_input_type"]],
|
| 54 |
-
outputs=[ui_components["cn_image_input"], ui_components["cn_video_input"]]
|
| 55 |
-
)
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
s_idx, d_idx, c_idx = 0, 0, 0
|
| 72 |
-
|
| 73 |
-
for param in params:
|
| 74 |
-
if s_idx + d_idx + c_idx >= MAX_DYNAMIC_CONTROLS: break
|
| 75 |
-
|
| 76 |
-
name = param["name"]
|
| 77 |
-
ptype = param["type"]
|
| 78 |
-
config = param["config"]
|
| 79 |
-
label = name.replace('_', ' ').title()
|
| 80 |
-
|
| 81 |
-
if ptype == "INT" or ptype == "FLOAT":
|
| 82 |
-
if s_idx < MAX_DYNAMIC_CONTROLS:
|
| 83 |
-
slider_updates.append(gr.update(
|
| 84 |
-
label=label,
|
| 85 |
-
minimum=config.get('min', 0),
|
| 86 |
-
maximum=config.get('max', 255),
|
| 87 |
-
step=config.get('step', 0.1 if ptype == "FLOAT" else 1),
|
| 88 |
-
value=config.get('default', 0),
|
| 89 |
-
visible=True
|
| 90 |
-
))
|
| 91 |
-
s_idx += 1
|
| 92 |
-
elif isinstance(ptype, list):
|
| 93 |
-
if d_idx < MAX_DYNAMIC_CONTROLS:
|
| 94 |
-
dropdown_updates.append(gr.update(
|
| 95 |
-
label=label,
|
| 96 |
-
choices=ptype,
|
| 97 |
-
value=config.get('default', ptype[0] if ptype else None),
|
| 98 |
-
visible=True
|
| 99 |
-
))
|
| 100 |
-
d_idx += 1
|
| 101 |
-
elif ptype == "BOOLEAN":
|
| 102 |
-
if c_idx < MAX_DYNAMIC_CONTROLS:
|
| 103 |
-
checkbox_updates.append(gr.update(
|
| 104 |
-
label=label,
|
| 105 |
-
value=config.get('default', False),
|
| 106 |
-
visible=True
|
| 107 |
-
))
|
| 108 |
-
c_idx += 1
|
| 109 |
-
|
| 110 |
-
for _ in range(s_idx, MAX_DYNAMIC_CONTROLS): slider_updates.append(gr.update(visible=False))
|
| 111 |
-
for _ in range(d_idx, MAX_DYNAMIC_CONTROLS): dropdown_updates.append(gr.update(visible=False))
|
| 112 |
-
for _ in range(c_idx, MAX_DYNAMIC_CONTROLS): checkbox_updates.append(gr.update(visible=False))
|
| 113 |
-
|
| 114 |
-
return slider_updates + dropdown_updates + checkbox_updates
|
| 115 |
-
|
| 116 |
-
def update_run_button_for_cpu(preprocessor_name):
|
| 117 |
-
if preprocessor_name in CPU_ONLY_PREPROCESSORS:
|
| 118 |
-
return gr.update(value="Run Preprocessor CPU Only", variant="primary"), gr.update(visible=False)
|
| 119 |
else:
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
|
| 155 |
def create_lora_event_handlers(prefix):
|
| 156 |
-
lora_rows = ui_components
|
|
|
|
| 157 |
lora_ids = ui_components[f'lora_ids_{prefix}']
|
| 158 |
lora_scales = ui_components[f'lora_scales_{prefix}']
|
| 159 |
lora_uploads = ui_components[f'lora_uploads_{prefix}']
|
|
@@ -193,7 +234,8 @@ def attach_event_handlers(ui_components, demo):
|
|
| 193 |
del_button.click(del_lora_row, [count_state], del_outputs, show_progress=False)
|
| 194 |
|
| 195 |
def create_controlnet_event_handlers(prefix):
|
| 196 |
-
cn_rows = ui_components
|
|
|
|
| 197 |
cn_types = ui_components[f'controlnet_types_{prefix}']
|
| 198 |
cn_series = ui_components[f'controlnet_series_{prefix}']
|
| 199 |
cn_filepaths = ui_components[f'controlnet_filepaths_{prefix}']
|
|
@@ -205,6 +247,114 @@ def attach_event_handlers(ui_components, demo):
|
|
| 205 |
del_button = ui_components[f'delete_controlnet_button_{prefix}']
|
| 206 |
accordion = ui_components[f'controlnet_accordion_{prefix}']
|
| 207 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
def add_cn_row(c):
|
| 209 |
c += 1
|
| 210 |
updates = {
|
|
@@ -232,8 +382,13 @@ def attach_event_handlers(ui_components, demo):
|
|
| 232 |
add_button.click(fn=add_cn_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 233 |
del_button.click(fn=del_cn_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 234 |
|
| 235 |
-
def on_cn_type_change(selected_type):
|
| 236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
series_choices = []
|
| 238 |
if selected_type:
|
| 239 |
series_choices = sorted(list(set(
|
|
@@ -249,8 +404,13 @@ def attach_event_handlers(ui_components, demo):
|
|
| 249 |
break
|
| 250 |
return gr.update(choices=series_choices, value=default_series), filepath
|
| 251 |
|
| 252 |
-
def on_cn_series_change(selected_series, selected_type):
|
| 253 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 254 |
filepath = "None"
|
| 255 |
if selected_series and selected_type:
|
| 256 |
for model in cn_config:
|
|
@@ -262,13 +422,13 @@ def attach_event_handlers(ui_components, demo):
|
|
| 262 |
for i in range(MAX_CONTROLNETS):
|
| 263 |
cn_types[i].change(
|
| 264 |
fn=on_cn_type_change,
|
| 265 |
-
inputs=[cn_types[i]],
|
| 266 |
outputs=[cn_series[i], cn_filepaths[i]],
|
| 267 |
show_progress=False
|
| 268 |
)
|
| 269 |
cn_series[i].change(
|
| 270 |
fn=on_cn_series_change,
|
| 271 |
-
inputs=[cn_series[i], cn_types[i]],
|
| 272 |
outputs=[cn_filepaths[i]],
|
| 273 |
show_progress=False
|
| 274 |
)
|
|
@@ -283,8 +443,69 @@ def attach_event_handlers(ui_components, demo):
|
|
| 283 |
show_progress=False
|
| 284 |
)
|
| 285 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
def create_ipadapter_event_handlers(prefix):
|
| 287 |
-
ipa_rows = ui_components
|
|
|
|
| 288 |
ipa_lora_strengths = ui_components[f'ipadapter_lora_strengths_{prefix}']
|
| 289 |
ipa_final_preset = ui_components[f'ipadapter_final_preset_{prefix}']
|
| 290 |
ipa_final_lora_strength = ui_components[f'ipadapter_final_lora_strength_{prefix}']
|
|
@@ -319,11 +540,10 @@ def attach_event_handlers(ui_components, demo):
|
|
| 319 |
def on_preset_change(preset_value):
|
| 320 |
config = load_ipadapter_config()
|
| 321 |
faceid_presets = []
|
| 322 |
-
if
|
| 323 |
-
faceid_presets
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
]
|
| 327 |
is_visible = preset_value in faceid_presets
|
| 328 |
updates = [gr.update(visible=is_visible)] * (MAX_IPADAPTERS + 1)
|
| 329 |
return updates
|
|
@@ -333,9 +553,42 @@ def attach_event_handlers(ui_components, demo):
|
|
| 333 |
|
| 334 |
accordion.expand(fn=lambda *imgs: [gr.update() for _ in imgs], inputs=ui_components[f'ipadapter_images_{prefix}'], outputs=ui_components[f'ipadapter_images_{prefix}'], show_progress=False)
|
| 335 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 336 |
|
| 337 |
def create_embedding_event_handlers(prefix):
|
| 338 |
-
rows = ui_components
|
|
|
|
| 339 |
ids = ui_components[f'embeddings_ids_{prefix}']
|
| 340 |
files = ui_components[f'embeddings_files_{prefix}']
|
| 341 |
count_state = ui_components[f'embedding_count_state_{prefix}']
|
|
@@ -368,7 +621,8 @@ def attach_event_handlers(ui_components, demo):
|
|
| 368 |
del_button.click(fn=del_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 369 |
|
| 370 |
def create_conditioning_event_handlers(prefix):
|
| 371 |
-
rows = ui_components
|
|
|
|
| 372 |
prompts = ui_components[f'conditioning_prompts_{prefix}']
|
| 373 |
count_state = ui_components[f'conditioning_count_state_{prefix}']
|
| 374 |
add_button = ui_components[f'add_conditioning_button_{prefix}']
|
|
@@ -423,38 +677,48 @@ def attach_event_handlers(ui_components, demo):
|
|
| 423 |
def create_run_event(prefix: str, task_type: str):
|
| 424 |
run_inputs_map = {
|
| 425 |
'model_display_name': ui_components[f'base_model_{prefix}'],
|
| 426 |
-
'positive_prompt': ui_components
|
| 427 |
-
'negative_prompt': ui_components
|
| 428 |
-
'seed': ui_components
|
| 429 |
-
'batch_size': ui_components
|
| 430 |
-
'guidance_scale': ui_components
|
| 431 |
-
'num_inference_steps': ui_components
|
| 432 |
-
'sampler': ui_components
|
| 433 |
-
'scheduler': ui_components
|
| 434 |
-
'zero_gpu_duration': ui_components
|
| 435 |
-
|
| 436 |
-
'clip_skip': ui_components
|
|
|
|
| 437 |
'task_type': gr.State(task_type)
|
| 438 |
}
|
| 439 |
|
| 440 |
if task_type not in ['img2img', 'inpaint']:
|
| 441 |
-
run_inputs_map.update({
|
|
|
|
|
|
|
|
|
|
| 442 |
|
| 443 |
task_specific_map = {
|
| 444 |
'img2img': {'img2img_image': f'input_image_{prefix}', 'img2img_denoise': f'denoise_{prefix}'},
|
| 445 |
-
'inpaint': {'inpaint_image_dict': f'input_image_dict_{prefix}'},
|
| 446 |
-
'outpaint': {'outpaint_image': f'input_image_{prefix}', '
|
| 447 |
'hires_fix': {'hires_image': f'input_image_{prefix}', 'hires_upscaler': f'hires_upscaler_{prefix}', 'hires_scale_by': f'hires_scale_by_{prefix}', 'hires_denoise': f'denoise_{prefix}'}
|
| 448 |
}
|
| 449 |
if task_type in task_specific_map:
|
| 450 |
for key, comp_name in task_specific_map[task_type].items():
|
| 451 |
-
|
|
|
|
| 452 |
|
| 453 |
lora_data_components = ui_components.get(f'all_lora_components_flat_{prefix}', [])
|
| 454 |
controlnet_data_components = ui_components.get(f'all_controlnet_components_flat_{prefix}', [])
|
|
|
|
| 455 |
ipadapter_data_components = ui_components.get(f'all_ipadapter_components_flat_{prefix}', [])
|
|
|
|
|
|
|
|
|
|
| 456 |
embedding_data_components = ui_components.get(f'all_embedding_components_flat_{prefix}', [])
|
| 457 |
conditioning_data_components = ui_components.get(f'all_conditioning_components_flat_{prefix}', [])
|
|
|
|
| 458 |
|
| 459 |
run_inputs_map['vae_source'] = ui_components.get(f'vae_source_{prefix}')
|
| 460 |
run_inputs_map['vae_id'] = ui_components.get(f'vae_id_{prefix}')
|
|
@@ -462,133 +726,441 @@ def attach_event_handlers(ui_components, demo):
|
|
| 462 |
|
| 463 |
input_keys = list(run_inputs_map.keys())
|
| 464 |
input_list_flat = [v for v in run_inputs_map.values() if v is not None]
|
| 465 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 466 |
|
| 467 |
def create_ui_inputs_dict(*args):
|
| 468 |
valid_keys = [k for k in input_keys if run_inputs_map[k] is not None]
|
| 469 |
ui_dict = dict(zip(valid_keys, args[:len(valid_keys)]))
|
| 470 |
arg_idx = len(valid_keys)
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 481 |
|
| 482 |
return ui_dict
|
| 483 |
|
| 484 |
-
ui_components
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 489 |
|
| 490 |
|
| 491 |
for prefix, task_type in [
|
| 492 |
("txt2img", "txt2img"), ("img2img", "img2img"), ("inpaint", "inpaint"),
|
| 493 |
("outpaint", "outpaint"), ("hires_fix", "hires_fix"),
|
| 494 |
]:
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 535 |
)
|
|
|
|
| 536 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 537 |
create_run_event(prefix, task_type)
|
| 538 |
|
| 539 |
-
def on_aspect_ratio_change(ratio_key, model_display_name):
|
| 540 |
-
model_type = MODEL_TYPE_MAP.get(model_display_name, 'sdxl').lower()
|
| 541 |
-
res_map = RESOLUTION_MAP.get(model_type, RESOLUTION_MAP.get("sdxl", {}))
|
| 542 |
-
w, h = res_map.get(ratio_key, (1024, 1024))
|
| 543 |
-
return w, h
|
| 544 |
|
| 545 |
-
for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
|
| 546 |
-
if f'aspect_ratio_{prefix}' in ui_components:
|
| 547 |
-
aspect_ratio_dropdown = ui_components[f'aspect_ratio_{prefix}']
|
| 548 |
-
width_component = ui_components[f'width_{prefix}']
|
| 549 |
-
height_component = ui_components[f'height_{prefix}']
|
| 550 |
-
model_dropdown = ui_components[f'base_model_{prefix}']
|
| 551 |
-
aspect_ratio_dropdown.change(fn=on_aspect_ratio_change, inputs=[aspect_ratio_dropdown, model_dropdown], outputs=[width_component, height_component], show_progress=False)
|
| 552 |
-
|
| 553 |
if 'view_mode_inpaint' in ui_components:
|
| 554 |
def toggle_inpaint_fullscreen_view(view_mode):
|
| 555 |
is_fullscreen = (view_mode == "Fullscreen View")
|
| 556 |
other_elements_visible = not is_fullscreen
|
| 557 |
editor_height = 800 if is_fullscreen else 272
|
| 558 |
-
|
| 559 |
-
|
| 560 |
ui_components['prompts_column_inpaint']: gr.update(visible=other_elements_visible),
|
| 561 |
ui_components['params_and_gallery_row_inpaint']: gr.update(visible=other_elements_visible),
|
| 562 |
ui_components['accordion_wrapper_inpaint']: gr.update(visible=other_elements_visible),
|
| 563 |
ui_components['input_image_dict_inpaint']: gr.update(height=editor_height),
|
| 564 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 565 |
|
| 566 |
-
output_components = [
|
| 567 |
-
|
| 568 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 569 |
ui_components['input_image_dict_inpaint']
|
| 570 |
-
]
|
| 571 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 572 |
|
| 573 |
def initialize_all_cn_dropdowns():
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
|
| 580 |
-
series_choices =
|
| 581 |
-
|
| 582 |
-
series_choices = sorted(list(set(model.get("Series", "Default") for model in cn_config if default_type in model.get("Type", []))))
|
| 583 |
-
default_series = series_choices[0] if series_choices else None
|
| 584 |
|
| 585 |
-
filepath = "None"
|
| 586 |
-
if default_series and default_type:
|
| 587 |
-
for model in cn_config:
|
| 588 |
-
if model.get("Series") == default_series and default_type in model.get("Type", []):
|
| 589 |
-
filepath = model.get("Filepath")
|
| 590 |
-
break
|
| 591 |
-
|
| 592 |
updates = {}
|
| 593 |
for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
|
| 594 |
if f'controlnet_types_{prefix}' in ui_components:
|
|
@@ -598,22 +1170,30 @@ def attach_event_handlers(ui_components, demo):
|
|
| 598 |
updates[series_dd] = gr.update(choices=series_choices, value=default_series)
|
| 599 |
for filepath_state in ui_components[f'controlnet_filepaths_{prefix}']:
|
| 600 |
updates[filepath_state] = filepath
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 601 |
return updates
|
| 602 |
|
| 603 |
def initialize_all_ipa_dropdowns():
|
| 604 |
config = load_ipadapter_config()
|
| 605 |
-
if not config
|
| 606 |
-
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
| 613 |
-
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
unified_presets.append(name)
|
| 617 |
|
| 618 |
all_presets = unified_presets + faceid_presets
|
| 619 |
default_preset = all_presets[0] if all_presets else None
|
|
@@ -636,19 +1216,6 @@ def attach_event_handlers(ui_components, demo):
|
|
| 636 |
|
| 637 |
all_updates = {**cn_updates, **ipa_updates}
|
| 638 |
|
| 639 |
-
default_preprocessor = "Canny Edge"
|
| 640 |
-
model_update = update_preprocessor_models_dropdown(default_preprocessor)
|
| 641 |
-
all_updates[ui_components["preprocessor_model_cn"]] = model_update
|
| 642 |
-
|
| 643 |
-
settings_outputs = update_preprocessor_settings_ui(default_preprocessor)
|
| 644 |
-
dynamic_outputs = ui_components["cn_sliders"] + ui_components["cn_dropdowns"] + ui_components["cn_checkboxes"]
|
| 645 |
-
for i, comp in enumerate(dynamic_outputs):
|
| 646 |
-
all_updates[comp] = settings_outputs[i]
|
| 647 |
-
|
| 648 |
-
run_button_update, zero_gpu_update = update_run_button_for_cpu(default_preprocessor)
|
| 649 |
-
all_updates[ui_components["run_cn"]] = run_button_update
|
| 650 |
-
all_updates[ui_components["zero_gpu_cn"]] = zero_gpu_update
|
| 651 |
-
|
| 652 |
return all_updates
|
| 653 |
|
| 654 |
all_load_outputs = []
|
|
@@ -657,22 +1224,35 @@ def attach_event_handlers(ui_components, demo):
|
|
| 657 |
all_load_outputs.extend(ui_components[f'controlnet_types_{prefix}'])
|
| 658 |
all_load_outputs.extend(ui_components[f'controlnet_series_{prefix}'])
|
| 659 |
all_load_outputs.extend(ui_components[f'controlnet_filepaths_{prefix}'])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 660 |
if f'ipadapter_final_preset_{prefix}' in ui_components:
|
| 661 |
all_load_outputs.extend(ui_components[f'ipadapter_lora_strengths_{prefix}'])
|
| 662 |
all_load_outputs.append(ui_components[f'ipadapter_final_preset_{prefix}'])
|
| 663 |
all_load_outputs.append(ui_components[f'ipadapter_final_lora_strength_{prefix}'])
|
| 664 |
|
| 665 |
-
all_load_outputs.extend([
|
| 666 |
-
ui_components["preprocessor_model_cn"],
|
| 667 |
-
*ui_components["cn_sliders"],
|
| 668 |
-
*ui_components["cn_dropdowns"],
|
| 669 |
-
*ui_components["cn_checkboxes"],
|
| 670 |
-
ui_components["run_cn"],
|
| 671 |
-
ui_components["zero_gpu_cn"]
|
| 672 |
-
])
|
| 673 |
-
|
| 674 |
if all_load_outputs:
|
| 675 |
demo.load(
|
| 676 |
fn=run_on_load,
|
| 677 |
outputs=all_load_outputs
|
| 678 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
from core.generation_logic import *
|
| 9 |
from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
|
| 10 |
|
| 11 |
+
from utils.app_utils import save_uploaded_file_with_hash
|
|
|
|
| 12 |
from ui.shared.ui_components import RESOLUTION_MAP, MAX_CONTROLNETS, MAX_IPADAPTERS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_LORAS
|
| 13 |
|
| 14 |
|
|
|
|
| 21 |
with open(_CN_MODEL_LIST_PATH, 'r', encoding='utf-8') as f:
|
| 22 |
config = yaml.safe_load(f)
|
| 23 |
print("--- ✅ controlnet_models.yaml loaded successfully ---")
|
| 24 |
+
return config.get("ControlNet", {})
|
| 25 |
except Exception as e:
|
| 26 |
print(f"Error loading controlnet_models.yaml: {e}")
|
| 27 |
+
return {}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def get_cn_defaults(arch_val):
|
| 31 |
+
cn_full_config = load_controlnet_config()
|
| 32 |
+
cn_config = cn_full_config.get(arch_val, [])
|
| 33 |
+
|
| 34 |
+
if not cn_config:
|
| 35 |
+
return [], None, [], None, "None"
|
| 36 |
+
|
| 37 |
+
all_types = sorted(list(set(t for model in cn_config for t in model.get("Type", []))))
|
| 38 |
+
default_type = all_types[0] if all_types else None
|
| 39 |
+
|
| 40 |
+
series_choices = []
|
| 41 |
+
if default_type:
|
| 42 |
+
series_choices = sorted(list(set(model.get("Series", "Default") for model in cn_config if default_type in model.get("Type", []))))
|
| 43 |
+
default_series = series_choices[0] if series_choices else None
|
| 44 |
+
|
| 45 |
+
filepath = "None"
|
| 46 |
+
if default_series and default_type:
|
| 47 |
+
for model in cn_config:
|
| 48 |
+
if model.get("Series") == default_series and default_type in model.get("Type", []):
|
| 49 |
+
filepath = model.get("Filepath")
|
| 50 |
+
break
|
| 51 |
+
|
| 52 |
+
return all_types, default_type, series_choices, default_series, filepath
|
| 53 |
+
|
| 54 |
+
@lru_cache(maxsize=1)
|
| 55 |
+
def load_diffsynth_controlnet_config():
|
| 56 |
+
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 57 |
+
_CN_MODEL_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'diffsynth_controlnet_models.yaml')
|
| 58 |
+
try:
|
| 59 |
+
print("--- Loading diffsynth_controlnet_models.yaml ---")
|
| 60 |
+
with open(_CN_MODEL_LIST_PATH, 'r', encoding='utf-8') as f:
|
| 61 |
+
config = yaml.safe_load(f)
|
| 62 |
+
print("--- ✅ diffsynth_controlnet_models.yaml loaded successfully ---")
|
| 63 |
+
return config.get("DiffSynth_ControlNet", {})
|
| 64 |
+
except Exception as e:
|
| 65 |
+
print(f"Error loading diffsynth_controlnet_models.yaml: {e}")
|
| 66 |
+
return {}
|
| 67 |
+
|
| 68 |
+
def get_diffsynth_cn_defaults(arch_val):
|
| 69 |
+
cn_full_config = load_diffsynth_controlnet_config()
|
| 70 |
+
cn_config = cn_full_config.get(arch_val, [])
|
| 71 |
+
|
| 72 |
+
if not cn_config:
|
| 73 |
+
return [], None, [], None, "None"
|
| 74 |
+
|
| 75 |
+
all_types = sorted(list(set(t for model in cn_config for t in model.get("Type", []))))
|
| 76 |
+
default_type = all_types[0] if all_types else None
|
| 77 |
+
|
| 78 |
+
series_choices = []
|
| 79 |
+
if default_type:
|
| 80 |
+
series_choices = sorted(list(set(model.get("Series", "Default") for model in cn_config if default_type in model.get("Type", []))))
|
| 81 |
+
default_series = series_choices[0] if series_choices else None
|
| 82 |
+
|
| 83 |
+
filepath = "None"
|
| 84 |
+
if default_series and default_type:
|
| 85 |
+
for model in cn_config:
|
| 86 |
+
if model.get("Series") == default_series and default_type in model.get("Type", []):
|
| 87 |
+
filepath = model.get("Filepath")
|
| 88 |
+
break
|
| 89 |
+
|
| 90 |
+
return all_types, default_type, series_choices, default_series, filepath
|
| 91 |
+
|
| 92 |
|
| 93 |
@lru_cache(maxsize=1)
|
| 94 |
def load_ipadapter_config():
|
|
|
|
| 105 |
return {}
|
| 106 |
|
| 107 |
|
| 108 |
+
def apply_data_to_ui(data, prefix, ui_components):
|
| 109 |
+
final_sampler = data.get('sampler') if data.get('sampler') in SAMPLER_CHOICES else SAMPLER_CHOICES[0]
|
| 110 |
+
default_scheduler = 'normal' if 'normal' in SCHEDULER_CHOICES else SCHEDULER_CHOICES[0]
|
| 111 |
+
final_scheduler = data.get('scheduler') if data.get('scheduler') in SCHEDULER_CHOICES else default_scheduler
|
| 112 |
+
|
| 113 |
+
updates = {}
|
| 114 |
+
base_model_name = data.get('base_model')
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
+
model_map = MODEL_MAP_CHECKPOINT
|
| 117 |
+
|
| 118 |
+
if f'base_model_{prefix}' in ui_components:
|
| 119 |
+
model_dropdown_component = ui_components[f'base_model_{prefix}']
|
| 120 |
+
if base_model_name and base_model_name in model_map:
|
| 121 |
+
updates[model_dropdown_component] = base_model_name
|
| 122 |
+
if f'model_arch_{prefix}' in ui_components:
|
| 123 |
+
m_type = MODEL_TYPE_MAP.get(base_model_name, "SDXL")
|
| 124 |
+
updates[ui_components[f'model_arch_{prefix}']] = m_type
|
| 125 |
+
if f'model_cat_{prefix}' in ui_components:
|
| 126 |
+
m_info = model_map.get(base_model_name)
|
| 127 |
+
m_cat = m_info[4] if m_info and len(m_info) > 4 else None
|
| 128 |
+
updates[ui_components[f'model_cat_{prefix}']] = m_cat if m_cat else "ALL"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
else:
|
| 130 |
+
updates[model_dropdown_component] = gr.update()
|
| 131 |
+
|
| 132 |
+
common_params = {
|
| 133 |
+
f'prompt_{prefix}': data.get('prompt', ''),
|
| 134 |
+
f'neg_prompt_{prefix}': data.get('negative_prompt', ''),
|
| 135 |
+
f'seed_{prefix}': data.get('seed', -1),
|
| 136 |
+
f'cfg_{prefix}': data.get('cfg_scale', 7.5),
|
| 137 |
+
f'steps_{prefix}': data.get('steps', 28),
|
| 138 |
+
f'sampler_{prefix}': final_sampler,
|
| 139 |
+
f'scheduler_{prefix}': final_scheduler,
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
for comp_name, value in common_params.items():
|
| 143 |
+
if comp_name in ui_components:
|
| 144 |
+
updates[ui_components[comp_name]] = value
|
| 145 |
+
|
| 146 |
+
if prefix == 'txt2img':
|
| 147 |
+
if f'width_{prefix}' in ui_components:
|
| 148 |
+
updates[ui_components[f'width_{prefix}']] = data.get('width', 1024)
|
| 149 |
+
if f'height_{prefix}' in ui_components:
|
| 150 |
+
updates[ui_components[f'height_{prefix}']] = data.get('height', 1024)
|
| 151 |
+
|
| 152 |
+
tab_indices = {"txt2img": 0, "img2img": 1, "inpaint": 2, "outpaint": 3, "hires_fix": 4}
|
| 153 |
+
tab_index = tab_indices.get(prefix, 0)
|
| 154 |
+
|
| 155 |
+
updates[ui_components['tabs']] = gr.Tabs(selected=tab_index)
|
| 156 |
+
|
| 157 |
+
return updates
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def send_info_to_tab(image, prefix, ui_components):
|
| 161 |
+
if not image or not image.info.get('parameters', ''):
|
| 162 |
+
all_comps = [comp for comp_or_list in ui_components.values() for comp in (comp_or_list if isinstance(comp_or_list, list) else [comp_or_list])]
|
| 163 |
+
return {comp: gr.update() for comp in all_comps}
|
| 164 |
+
|
| 165 |
+
data = parse_parameters(image.info['parameters'])
|
| 166 |
+
|
| 167 |
+
image_input_map = {
|
| 168 |
+
"img2img": 'input_image_img2img',
|
| 169 |
+
"inpaint": 'input_image_dict_inpaint',
|
| 170 |
+
"outpaint": 'input_image_outpaint',
|
| 171 |
+
"hires_fix": 'input_image_hires_fix'
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
updates = apply_data_to_ui(data, prefix, ui_components)
|
| 175 |
+
|
| 176 |
+
if prefix in image_input_map and image_input_map[prefix] in ui_components:
|
| 177 |
+
component_key = image_input_map[prefix]
|
| 178 |
+
updates[ui_components[component_key]] = gr.update(value=image)
|
| 179 |
+
|
| 180 |
+
return updates
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def send_info_by_hash(image, ui_components):
|
| 184 |
+
if not image or not image.info.get('parameters', ''):
|
| 185 |
+
all_comps = [comp for comp_or_list in ui_components.values() for comp in (comp_or_list if isinstance(comp_or_list, list) else [comp_or_list])]
|
| 186 |
+
return {comp: gr.update() for comp in all_comps}
|
| 187 |
+
|
| 188 |
+
data = parse_parameters(image.info['parameters'])
|
| 189 |
+
|
| 190 |
+
return apply_data_to_ui(data, "txt2img", ui_components)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def attach_event_handlers(ui_components, demo):
|
| 194 |
|
| 195 |
def create_lora_event_handlers(prefix):
|
| 196 |
+
lora_rows = ui_components.get(f'lora_rows_{prefix}')
|
| 197 |
+
if not lora_rows: return
|
| 198 |
lora_ids = ui_components[f'lora_ids_{prefix}']
|
| 199 |
lora_scales = ui_components[f'lora_scales_{prefix}']
|
| 200 |
lora_uploads = ui_components[f'lora_uploads_{prefix}']
|
|
|
|
| 234 |
del_button.click(del_lora_row, [count_state], del_outputs, show_progress=False)
|
| 235 |
|
| 236 |
def create_controlnet_event_handlers(prefix):
|
| 237 |
+
cn_rows = ui_components.get(f'controlnet_rows_{prefix}')
|
| 238 |
+
if not cn_rows: return
|
| 239 |
cn_types = ui_components[f'controlnet_types_{prefix}']
|
| 240 |
cn_series = ui_components[f'controlnet_series_{prefix}']
|
| 241 |
cn_filepaths = ui_components[f'controlnet_filepaths_{prefix}']
|
|
|
|
| 247 |
del_button = ui_components[f'delete_controlnet_button_{prefix}']
|
| 248 |
accordion = ui_components[f'controlnet_accordion_{prefix}']
|
| 249 |
|
| 250 |
+
arch_comp = ui_components.get(f'model_arch_{prefix}')
|
| 251 |
+
actual_arch_comp = arch_comp if arch_comp else gr.State("SDXL")
|
| 252 |
+
|
| 253 |
+
def add_cn_row(c):
|
| 254 |
+
c += 1
|
| 255 |
+
updates = {
|
| 256 |
+
count_state: c,
|
| 257 |
+
cn_rows[c-1]: gr.update(visible=True),
|
| 258 |
+
add_button: gr.update(visible=c < MAX_CONTROLNETS),
|
| 259 |
+
del_button: gr.update(visible=True)
|
| 260 |
+
}
|
| 261 |
+
return updates
|
| 262 |
+
|
| 263 |
+
def del_cn_row(c):
|
| 264 |
+
c -= 1
|
| 265 |
+
updates = {
|
| 266 |
+
count_state: c,
|
| 267 |
+
cn_rows[c]: gr.update(visible=False),
|
| 268 |
+
cn_images[c]: None,
|
| 269 |
+
cn_strengths[c]: 1.0,
|
| 270 |
+
add_button: gr.update(visible=True),
|
| 271 |
+
del_button: gr.update(visible=c > 0)
|
| 272 |
+
}
|
| 273 |
+
return updates
|
| 274 |
+
|
| 275 |
+
add_outputs = [count_state, add_button, del_button] + cn_rows
|
| 276 |
+
del_outputs = [count_state, add_button, del_button] + cn_rows + cn_images + cn_strengths
|
| 277 |
+
add_button.click(fn=add_cn_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 278 |
+
del_button.click(fn=del_cn_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 279 |
+
|
| 280 |
+
def on_cn_type_change(selected_type, arch_val):
|
| 281 |
+
cn_full_config = load_controlnet_config()
|
| 282 |
+
|
| 283 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 284 |
+
controlnet_key = architectures_dict.get(arch_val, {}).get("controlnet_key", arch_val)
|
| 285 |
+
|
| 286 |
+
cn_config = cn_full_config.get(controlnet_key, [])
|
| 287 |
+
series_choices = []
|
| 288 |
+
if selected_type:
|
| 289 |
+
series_choices = sorted(list(set(
|
| 290 |
+
model.get("Series", "Default") for model in cn_config
|
| 291 |
+
if selected_type in model.get("Type", [])
|
| 292 |
+
)))
|
| 293 |
+
default_series = series_choices[0] if series_choices else None
|
| 294 |
+
filepath = "None"
|
| 295 |
+
if default_series:
|
| 296 |
+
for model in cn_config:
|
| 297 |
+
if model.get("Series") == default_series and selected_type in model.get("Type", []):
|
| 298 |
+
filepath = model.get("Filepath")
|
| 299 |
+
break
|
| 300 |
+
return gr.update(choices=series_choices, value=default_series), filepath
|
| 301 |
+
|
| 302 |
+
def on_cn_series_change(selected_series, selected_type, arch_val):
|
| 303 |
+
cn_full_config = load_controlnet_config()
|
| 304 |
+
|
| 305 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 306 |
+
controlnet_key = architectures_dict.get(arch_val, {}).get("controlnet_key", arch_val)
|
| 307 |
+
|
| 308 |
+
cn_config = cn_full_config.get(controlnet_key, [])
|
| 309 |
+
filepath = "None"
|
| 310 |
+
if selected_series and selected_type:
|
| 311 |
+
for model in cn_config:
|
| 312 |
+
if model.get("Series") == selected_series and selected_type in model.get("Type", []):
|
| 313 |
+
filepath = model.get("Filepath")
|
| 314 |
+
break
|
| 315 |
+
return filepath
|
| 316 |
+
|
| 317 |
+
for i in range(MAX_CONTROLNETS):
|
| 318 |
+
cn_types[i].change(
|
| 319 |
+
fn=on_cn_type_change,
|
| 320 |
+
inputs=[cn_types[i], actual_arch_comp],
|
| 321 |
+
outputs=[cn_series[i], cn_filepaths[i]],
|
| 322 |
+
show_progress=False
|
| 323 |
+
)
|
| 324 |
+
cn_series[i].change(
|
| 325 |
+
fn=on_cn_series_change,
|
| 326 |
+
inputs=[cn_series[i], cn_types[i], actual_arch_comp],
|
| 327 |
+
outputs=[cn_filepaths[i]],
|
| 328 |
+
show_progress=False
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
def on_accordion_expand(*images):
|
| 332 |
+
return [gr.update() for _ in images]
|
| 333 |
+
|
| 334 |
+
accordion.expand(
|
| 335 |
+
fn=on_accordion_expand,
|
| 336 |
+
inputs=cn_images,
|
| 337 |
+
outputs=cn_images,
|
| 338 |
+
show_progress=False
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
def create_diffsynth_controlnet_event_handlers(prefix):
|
| 342 |
+
cn_rows = ui_components.get(f'diffsynth_controlnet_rows_{prefix}')
|
| 343 |
+
if not cn_rows: return
|
| 344 |
+
cn_types = ui_components[f'diffsynth_controlnet_types_{prefix}']
|
| 345 |
+
cn_series = ui_components[f'diffsynth_controlnet_series_{prefix}']
|
| 346 |
+
cn_filepaths = ui_components[f'diffsynth_controlnet_filepaths_{prefix}']
|
| 347 |
+
cn_images = ui_components[f'diffsynth_controlnet_images_{prefix}']
|
| 348 |
+
cn_strengths = ui_components[f'diffsynth_controlnet_strengths_{prefix}']
|
| 349 |
+
|
| 350 |
+
count_state = ui_components[f'diffsynth_controlnet_count_state_{prefix}']
|
| 351 |
+
add_button = ui_components[f'add_diffsynth_controlnet_button_{prefix}']
|
| 352 |
+
del_button = ui_components[f'delete_diffsynth_controlnet_button_{prefix}']
|
| 353 |
+
accordion = ui_components[f'diffsynth_controlnet_accordion_{prefix}']
|
| 354 |
+
|
| 355 |
+
arch_comp = ui_components.get(f'model_arch_{prefix}')
|
| 356 |
+
actual_arch_comp = arch_comp if arch_comp else gr.State("Z-Image")
|
| 357 |
+
|
| 358 |
def add_cn_row(c):
|
| 359 |
c += 1
|
| 360 |
updates = {
|
|
|
|
| 382 |
add_button.click(fn=add_cn_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 383 |
del_button.click(fn=del_cn_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 384 |
|
| 385 |
+
def on_cn_type_change(selected_type, arch_val):
|
| 386 |
+
cn_full_config = load_diffsynth_controlnet_config()
|
| 387 |
+
|
| 388 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 389 |
+
controlnet_key = architectures_dict.get(arch_val, {}).get("controlnet_key", arch_val)
|
| 390 |
+
|
| 391 |
+
cn_config = cn_full_config.get(controlnet_key, [])
|
| 392 |
series_choices = []
|
| 393 |
if selected_type:
|
| 394 |
series_choices = sorted(list(set(
|
|
|
|
| 404 |
break
|
| 405 |
return gr.update(choices=series_choices, value=default_series), filepath
|
| 406 |
|
| 407 |
+
def on_cn_series_change(selected_series, selected_type, arch_val):
|
| 408 |
+
cn_full_config = load_diffsynth_controlnet_config()
|
| 409 |
+
|
| 410 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 411 |
+
controlnet_key = architectures_dict.get(arch_val, {}).get("controlnet_key", arch_val)
|
| 412 |
+
|
| 413 |
+
cn_config = cn_full_config.get(controlnet_key, [])
|
| 414 |
filepath = "None"
|
| 415 |
if selected_series and selected_type:
|
| 416 |
for model in cn_config:
|
|
|
|
| 422 |
for i in range(MAX_CONTROLNETS):
|
| 423 |
cn_types[i].change(
|
| 424 |
fn=on_cn_type_change,
|
| 425 |
+
inputs=[cn_types[i], actual_arch_comp],
|
| 426 |
outputs=[cn_series[i], cn_filepaths[i]],
|
| 427 |
show_progress=False
|
| 428 |
)
|
| 429 |
cn_series[i].change(
|
| 430 |
fn=on_cn_series_change,
|
| 431 |
+
inputs=[cn_series[i], cn_types[i], actual_arch_comp],
|
| 432 |
outputs=[cn_filepaths[i]],
|
| 433 |
show_progress=False
|
| 434 |
)
|
|
|
|
| 443 |
show_progress=False
|
| 444 |
)
|
| 445 |
|
| 446 |
+
def create_flux1_ipadapter_event_handlers(prefix):
|
| 447 |
+
fipa_rows = ui_components.get(f'flux1_ipadapter_rows_{prefix}')
|
| 448 |
+
if not fipa_rows: return
|
| 449 |
+
count_state = ui_components[f'flux1_ipadapter_count_state_{prefix}']
|
| 450 |
+
add_button = ui_components[f'add_flux1_ipadapter_button_{prefix}']
|
| 451 |
+
del_button = ui_components[f'delete_flux1_ipadapter_button_{prefix}']
|
| 452 |
+
|
| 453 |
+
def add_fipa_row(c):
|
| 454 |
+
c += 1
|
| 455 |
+
return {
|
| 456 |
+
count_state: c,
|
| 457 |
+
fipa_rows[c - 1]: gr.update(visible=True),
|
| 458 |
+
add_button: gr.update(visible=c < MAX_IPADAPTERS),
|
| 459 |
+
del_button: gr.update(visible=True),
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
def del_fipa_row(c):
|
| 463 |
+
c -= 1
|
| 464 |
+
return {
|
| 465 |
+
count_state: c,
|
| 466 |
+
fipa_rows[c]: gr.update(visible=False),
|
| 467 |
+
add_button: gr.update(visible=True),
|
| 468 |
+
del_button: gr.update(visible=c > 0),
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
add_outputs = [count_state, add_button, del_button] + fipa_rows
|
| 472 |
+
del_outputs = [count_state, add_button, del_button] + fipa_rows
|
| 473 |
+
add_button.click(fn=add_fipa_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 474 |
+
del_button.click(fn=del_fipa_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 475 |
+
|
| 476 |
+
def create_style_event_handlers(prefix):
|
| 477 |
+
style_rows = ui_components.get(f'style_rows_{prefix}')
|
| 478 |
+
if not style_rows: return
|
| 479 |
+
count_state = ui_components[f'style_count_state_{prefix}']
|
| 480 |
+
add_button = ui_components[f'add_style_button_{prefix}']
|
| 481 |
+
del_button = ui_components[f'delete_style_button_{prefix}']
|
| 482 |
+
|
| 483 |
+
def add_style_row(c):
|
| 484 |
+
c += 1
|
| 485 |
+
return {
|
| 486 |
+
count_state: c,
|
| 487 |
+
style_rows[c - 1]: gr.update(visible=True),
|
| 488 |
+
add_button: gr.update(visible=c < 5),
|
| 489 |
+
del_button: gr.update(visible=True),
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
def del_style_row(c):
|
| 493 |
+
c -= 1
|
| 494 |
+
return {
|
| 495 |
+
count_state: c,
|
| 496 |
+
style_rows[c]: gr.update(visible=False),
|
| 497 |
+
add_button: gr.update(visible=True),
|
| 498 |
+
del_button: gr.update(visible=c > 0),
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
add_outputs = [count_state, add_button, del_button] + style_rows
|
| 502 |
+
del_outputs = [count_state, add_button, del_button] + style_rows
|
| 503 |
+
add_button.click(fn=add_style_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 504 |
+
del_button.click(fn=del_style_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 505 |
+
|
| 506 |
def create_ipadapter_event_handlers(prefix):
|
| 507 |
+
ipa_rows = ui_components.get(f'ipadapter_rows_{prefix}')
|
| 508 |
+
if not ipa_rows: return
|
| 509 |
ipa_lora_strengths = ui_components[f'ipadapter_lora_strengths_{prefix}']
|
| 510 |
ipa_final_preset = ui_components[f'ipadapter_final_preset_{prefix}']
|
| 511 |
ipa_final_lora_strength = ui_components[f'ipadapter_final_lora_strength_{prefix}']
|
|
|
|
| 540 |
def on_preset_change(preset_value):
|
| 541 |
config = load_ipadapter_config()
|
| 542 |
faceid_presets = []
|
| 543 |
+
if config:
|
| 544 |
+
faceid_presets.extend(config.get("IPAdapter_FaceID_presets", {}).get("SDXL", []))
|
| 545 |
+
faceid_presets.extend(config.get("IPAdapter_FaceID_presets", {}).get("SD1.5", []))
|
| 546 |
+
|
|
|
|
| 547 |
is_visible = preset_value in faceid_presets
|
| 548 |
updates = [gr.update(visible=is_visible)] * (MAX_IPADAPTERS + 1)
|
| 549 |
return updates
|
|
|
|
| 553 |
|
| 554 |
accordion.expand(fn=lambda *imgs: [gr.update() for _ in imgs], inputs=ui_components[f'ipadapter_images_{prefix}'], outputs=ui_components[f'ipadapter_images_{prefix}'], show_progress=False)
|
| 555 |
|
| 556 |
+
def create_reference_latent_event_handlers(prefix):
|
| 557 |
+
ref_rows = ui_components.get(f'reference_latent_rows_{prefix}')
|
| 558 |
+
if not ref_rows: return
|
| 559 |
+
count_state = ui_components[f'reference_latent_count_state_{prefix}']
|
| 560 |
+
add_button = ui_components[f'add_reference_latent_button_{prefix}']
|
| 561 |
+
del_button = ui_components[f'delete_reference_latent_button_{prefix}']
|
| 562 |
+
images = ui_components[f'reference_latent_images_{prefix}']
|
| 563 |
+
|
| 564 |
+
def add_ref_row(c):
|
| 565 |
+
c += 1
|
| 566 |
+
return {
|
| 567 |
+
count_state: c,
|
| 568 |
+
ref_rows[c - 1]: gr.update(visible=True),
|
| 569 |
+
add_button: gr.update(visible=c < 10),
|
| 570 |
+
del_button: gr.update(visible=True),
|
| 571 |
+
}
|
| 572 |
+
|
| 573 |
+
def del_ref_row(c):
|
| 574 |
+
c -= 1
|
| 575 |
+
return {
|
| 576 |
+
count_state: c,
|
| 577 |
+
ref_rows[c]: gr.update(visible=False),
|
| 578 |
+
images[c]: None,
|
| 579 |
+
add_button: gr.update(visible=True),
|
| 580 |
+
del_button: gr.update(visible=c > 0),
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
add_outputs = [count_state, add_button, del_button] + ref_rows
|
| 584 |
+
del_outputs = [count_state, add_button, del_button] + ref_rows + images
|
| 585 |
+
add_button.click(fn=add_ref_row, inputs=[count_state], outputs=add_outputs, show_progress=False)
|
| 586 |
+
del_button.click(fn=del_ref_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 587 |
+
|
| 588 |
|
| 589 |
def create_embedding_event_handlers(prefix):
|
| 590 |
+
rows = ui_components.get(f'embedding_rows_{prefix}')
|
| 591 |
+
if not rows: return
|
| 592 |
ids = ui_components[f'embeddings_ids_{prefix}']
|
| 593 |
files = ui_components[f'embeddings_files_{prefix}']
|
| 594 |
count_state = ui_components[f'embedding_count_state_{prefix}']
|
|
|
|
| 621 |
del_button.click(fn=del_row, inputs=[count_state], outputs=del_outputs, show_progress=False)
|
| 622 |
|
| 623 |
def create_conditioning_event_handlers(prefix):
|
| 624 |
+
rows = ui_components.get(f'conditioning_rows_{prefix}')
|
| 625 |
+
if not rows: return
|
| 626 |
prompts = ui_components[f'conditioning_prompts_{prefix}']
|
| 627 |
count_state = ui_components[f'conditioning_count_state_{prefix}']
|
| 628 |
add_button = ui_components[f'add_conditioning_button_{prefix}']
|
|
|
|
| 677 |
def create_run_event(prefix: str, task_type: str):
|
| 678 |
run_inputs_map = {
|
| 679 |
'model_display_name': ui_components[f'base_model_{prefix}'],
|
| 680 |
+
'positive_prompt': ui_components.get(f'prompt_{prefix}') or ui_components.get(f'{prefix}_positive_prompt'),
|
| 681 |
+
'negative_prompt': ui_components.get(f'neg_prompt_{prefix}') or ui_components.get(f'{prefix}_negative_prompt'),
|
| 682 |
+
'seed': ui_components.get(f'seed_{prefix}') or ui_components.get(f'{prefix}_seed'),
|
| 683 |
+
'batch_size': ui_components.get(f'batch_size_{prefix}') or ui_components.get(f'{prefix}_batch_size'),
|
| 684 |
+
'guidance_scale': ui_components.get(f'cfg_{prefix}') or ui_components.get(f'{prefix}_cfg'),
|
| 685 |
+
'num_inference_steps': ui_components.get(f'steps_{prefix}') or ui_components.get(f'{prefix}_steps'),
|
| 686 |
+
'sampler': ui_components.get(f'sampler_{prefix}') or ui_components.get(f'{prefix}_sampler_name'),
|
| 687 |
+
'scheduler': ui_components.get(f'scheduler_{prefix}') or ui_components.get(f'{prefix}_scheduler'),
|
| 688 |
+
'zero_gpu_duration': ui_components.get(f'zero_gpu_{prefix}'),
|
| 689 |
+
|
| 690 |
+
'clip_skip': ui_components.get(f'clip_skip_{prefix}'),
|
| 691 |
+
'guidance': ui_components.get(f'guidance_{prefix}'),
|
| 692 |
'task_type': gr.State(task_type)
|
| 693 |
}
|
| 694 |
|
| 695 |
if task_type not in ['img2img', 'inpaint']:
|
| 696 |
+
run_inputs_map.update({
|
| 697 |
+
'width': ui_components.get(f'width_{prefix}') or ui_components.get(f'{prefix}_width'),
|
| 698 |
+
'height': ui_components.get(f'height_{prefix}') or ui_components.get(f'{prefix}_height')
|
| 699 |
+
})
|
| 700 |
|
| 701 |
task_specific_map = {
|
| 702 |
'img2img': {'img2img_image': f'input_image_{prefix}', 'img2img_denoise': f'denoise_{prefix}'},
|
| 703 |
+
'inpaint': {'inpaint_image_dict': f'input_image_dict_{prefix}', 'grow_mask_by': f'grow_mask_by_{prefix}'},
|
| 704 |
+
'outpaint': {'outpaint_image': f'input_image_{prefix}', 'left': f'left_{prefix}', 'top': f'top_{prefix}', 'right': f'right_{prefix}', 'bottom': f'bottom_{prefix}', 'feathering': f'feathering_{prefix}'},
|
| 705 |
'hires_fix': {'hires_image': f'input_image_{prefix}', 'hires_upscaler': f'hires_upscaler_{prefix}', 'hires_scale_by': f'hires_scale_by_{prefix}', 'hires_denoise': f'denoise_{prefix}'}
|
| 706 |
}
|
| 707 |
if task_type in task_specific_map:
|
| 708 |
for key, comp_name in task_specific_map[task_type].items():
|
| 709 |
+
if comp_name in ui_components:
|
| 710 |
+
run_inputs_map[key] = ui_components[comp_name]
|
| 711 |
|
| 712 |
lora_data_components = ui_components.get(f'all_lora_components_flat_{prefix}', [])
|
| 713 |
controlnet_data_components = ui_components.get(f'all_controlnet_components_flat_{prefix}', [])
|
| 714 |
+
diffsynth_controlnet_data_components = ui_components.get(f'all_diffsynth_controlnet_components_flat_{prefix}', [])
|
| 715 |
ipadapter_data_components = ui_components.get(f'all_ipadapter_components_flat_{prefix}', [])
|
| 716 |
+
sd3_ipadapter_data_components = ui_components.get(f'all_sd3_ipadapter_components_flat_{prefix}', [])
|
| 717 |
+
flux1_ipadapter_data_components = ui_components.get(f'all_flux1_ipadapter_components_flat_{prefix}', [])
|
| 718 |
+
style_data_components = ui_components.get(f'all_style_components_flat_{prefix}', [])
|
| 719 |
embedding_data_components = ui_components.get(f'all_embedding_components_flat_{prefix}', [])
|
| 720 |
conditioning_data_components = ui_components.get(f'all_conditioning_components_flat_{prefix}', [])
|
| 721 |
+
reference_latent_data_components = ui_components.get(f'all_reference_latent_components_flat_{prefix}', [])
|
| 722 |
|
| 723 |
run_inputs_map['vae_source'] = ui_components.get(f'vae_source_{prefix}')
|
| 724 |
run_inputs_map['vae_id'] = ui_components.get(f'vae_id_{prefix}')
|
|
|
|
| 726 |
|
| 727 |
input_keys = list(run_inputs_map.keys())
|
| 728 |
input_list_flat = [v for v in run_inputs_map.values() if v is not None]
|
| 729 |
+
all_chains = [
|
| 730 |
+
lora_data_components, controlnet_data_components, diffsynth_controlnet_data_components, ipadapter_data_components,
|
| 731 |
+
sd3_ipadapter_data_components, flux1_ipadapter_data_components, style_data_components,
|
| 732 |
+
embedding_data_components, conditioning_data_components, reference_latent_data_components
|
| 733 |
+
]
|
| 734 |
+
for chain in all_chains:
|
| 735 |
+
if chain:
|
| 736 |
+
input_list_flat.extend(chain)
|
| 737 |
|
| 738 |
def create_ui_inputs_dict(*args):
|
| 739 |
valid_keys = [k for k in input_keys if run_inputs_map[k] is not None]
|
| 740 |
ui_dict = dict(zip(valid_keys, args[:len(valid_keys)]))
|
| 741 |
arg_idx = len(valid_keys)
|
| 742 |
+
|
| 743 |
+
def assign_chain_data(chain_key, components_list):
|
| 744 |
+
nonlocal arg_idx
|
| 745 |
+
if components_list:
|
| 746 |
+
ui_dict[chain_key] = list(args[arg_idx : arg_idx + len(components_list)])
|
| 747 |
+
arg_idx += len(components_list)
|
| 748 |
+
|
| 749 |
+
assign_chain_data('lora_data', lora_data_components)
|
| 750 |
+
assign_chain_data('controlnet_data', controlnet_data_components)
|
| 751 |
+
assign_chain_data('diffsynth_controlnet_data', diffsynth_controlnet_data_components)
|
| 752 |
+
assign_chain_data('ipadapter_data', ipadapter_data_components)
|
| 753 |
+
assign_chain_data('sd3_ipadapter_chain', sd3_ipadapter_data_components)
|
| 754 |
+
assign_chain_data('flux1_ipadapter_data', flux1_ipadapter_data_components)
|
| 755 |
+
assign_chain_data('style_data', style_data_components)
|
| 756 |
+
assign_chain_data('embedding_data', embedding_data_components)
|
| 757 |
+
assign_chain_data('conditioning_data', conditioning_data_components)
|
| 758 |
+
assign_chain_data('reference_latent_data', reference_latent_data_components)
|
| 759 |
|
| 760 |
return ui_dict
|
| 761 |
|
| 762 |
+
run_btn = ui_components.get(f'run_{prefix}') or ui_components.get(f'{prefix}_run_button')
|
| 763 |
+
res_gal = ui_components.get(f'result_{prefix}') or ui_components.get(f'{prefix}_output_gallery')
|
| 764 |
+
if run_btn and res_gal:
|
| 765 |
+
run_btn.click(
|
| 766 |
+
fn=lambda *args, progress=gr.Progress(track_tqdm=True): generate_image_wrapper(create_ui_inputs_dict(*args), progress),
|
| 767 |
+
inputs=input_list_flat,
|
| 768 |
+
outputs=[res_gal]
|
| 769 |
+
)
|
| 770 |
+
|
| 771 |
+
def make_update_fn(m_comp, cat_comp, cs_comp, ar_comp, width_comp, height_comp, cn_types, cn_series, cn_filepaths, diffsynth_cn_types, diffsynth_cn_series, diffsynth_cn_filepaths, ipa_preset, lora_acc, cn_acc, diffsynth_cn_acc, ipa_acc, sd3_ipa_acc, flux1_ipa_acc, style_acc, embed_acc, cond_acc, ref_latent_acc, guidance_comp, prompt_comp, neg_prompt_comp, steps_comp, cfg_comp, sampler_comp, scheduler_comp):
|
| 772 |
+
def update_fn(*args):
|
| 773 |
+
arch = args[0]
|
| 774 |
+
category = args[1]
|
| 775 |
+
current_ar = args[2] if len(args) > 2 else None
|
| 776 |
+
from core.settings import MODEL_TYPE_MAP, MODEL_MAP_CHECKPOINT, FEATURES_CONFIG, ARCHITECTURES_CONFIG, MODEL_DEFAULTS_CONFIG, ARCH_CATEGORIES_MAP
|
| 777 |
+
from utils.app_utils import get_model_generation_defaults
|
| 778 |
+
|
| 779 |
+
if arch == "ALL":
|
| 780 |
+
valid_cats = list(set(cat for cats in ARCH_CATEGORIES_MAP.values() for cat in cats))
|
| 781 |
+
else:
|
| 782 |
+
valid_cats = ARCH_CATEGORIES_MAP.get(arch, [])
|
| 783 |
+
|
| 784 |
+
cat_choices = ["ALL"] + sorted(valid_cats)
|
| 785 |
+
new_category = category if category in cat_choices else "ALL"
|
| 786 |
+
|
| 787 |
+
choices = []
|
| 788 |
+
for name, info in MODEL_MAP_CHECKPOINT.items():
|
| 789 |
+
m_arch = info[2]
|
| 790 |
+
m_cat = info[4] if len(info) > 4 else None
|
| 791 |
+
arch_match = (arch == "ALL" or m_arch == arch)
|
| 792 |
+
cat_match = (new_category == "ALL" or m_cat == new_category)
|
| 793 |
+
if arch_match and cat_match:
|
| 794 |
+
choices.append(name)
|
| 795 |
+
|
| 796 |
+
val = choices[0] if choices else None
|
| 797 |
+
|
| 798 |
+
updates = {
|
| 799 |
+
m_comp: gr.update(choices=choices, value=val),
|
| 800 |
+
cat_comp: gr.update(choices=cat_choices, value=new_category)
|
| 801 |
+
}
|
| 802 |
+
|
| 803 |
+
m_type = MODEL_TYPE_MAP.get(val, "SDXL") if val else "SDXL"
|
| 804 |
+
|
| 805 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 806 |
+
arch_model_type = architectures_dict.get(m_type, {}).get("model_type", m_type.lower().replace(" ", "").replace(".", ""))
|
| 807 |
+
|
| 808 |
+
arch_features = FEATURES_CONFIG.get(arch_model_type, FEATURES_CONFIG.get('default', {}))
|
| 809 |
+
enabled_chains = arch_features.get('enabled_chains', [])
|
| 810 |
+
|
| 811 |
+
if lora_acc: updates[lora_acc] = gr.update(visible=('lora' in enabled_chains))
|
| 812 |
+
if cn_acc: updates[cn_acc] = gr.update(visible=('controlnet' in enabled_chains))
|
| 813 |
+
if diffsynth_cn_acc: updates[diffsynth_cn_acc] = gr.update(visible=('controlnet_model_patch' in enabled_chains))
|
| 814 |
+
if ipa_acc: updates[ipa_acc] = gr.update(visible=('ipadapter' in enabled_chains))
|
| 815 |
+
if flux1_ipa_acc: updates[flux1_ipa_acc] = gr.update(visible=('flux1_ipadapter' in enabled_chains))
|
| 816 |
+
if sd3_ipa_acc: updates[sd3_ipa_acc] = gr.update(visible=('sd3_ipadapter' in enabled_chains))
|
| 817 |
+
if style_acc: updates[style_acc] = gr.update(visible=('style' in enabled_chains))
|
| 818 |
+
if embed_acc: updates[embed_acc] = gr.update(visible=('embedding' in enabled_chains))
|
| 819 |
+
if cond_acc: updates[cond_acc] = gr.update(visible=('conditioning' in enabled_chains))
|
| 820 |
+
if ref_latent_acc: updates[ref_latent_acc] = gr.update(visible=('reference_latent' in enabled_chains))
|
| 821 |
+
|
| 822 |
+
if cs_comp:
|
| 823 |
+
updates[cs_comp] = gr.update(visible=(arch_model_type == "sd15"))
|
| 824 |
+
if guidance_comp:
|
| 825 |
+
updates[guidance_comp] = gr.update(visible=(arch_model_type == "flux1"))
|
| 826 |
+
|
| 827 |
+
if ar_comp:
|
| 828 |
+
res_key = arch_model_type
|
| 829 |
+
if res_key not in RESOLUTION_MAP:
|
| 830 |
+
res_key = 'sdxl'
|
| 831 |
+
res_map = RESOLUTION_MAP.get(res_key, {})
|
| 832 |
+
target_ar = current_ar if current_ar in res_map else (list(res_map.keys())[0] if res_map else "1:1 (Square)")
|
| 833 |
+
updates[ar_comp] = gr.update(choices=list(res_map.keys()), value=target_ar)
|
| 834 |
+
if width_comp and height_comp and target_ar in res_map:
|
| 835 |
+
updates[width_comp] = gr.update(value=res_map[target_ar][0])
|
| 836 |
+
updates[height_comp] = gr.update(value=res_map[target_ar][1])
|
| 837 |
+
|
| 838 |
+
controlnet_key = architectures_dict.get(m_type, {}).get("controlnet_key", m_type)
|
| 839 |
+
|
| 840 |
+
all_types, default_type, series_choices, default_series, filepath = get_cn_defaults(controlnet_key)
|
| 841 |
+
for t_comp in cn_types:
|
| 842 |
+
updates[t_comp] = gr.update(choices=all_types, value=default_type)
|
| 843 |
+
for s_comp in cn_series:
|
| 844 |
+
updates[s_comp] = gr.update(choices=series_choices, value=default_series)
|
| 845 |
+
for f_comp in cn_filepaths:
|
| 846 |
+
updates[f_comp] = filepath
|
| 847 |
+
|
| 848 |
+
diffsynth_all_types, diffsynth_default_type, diffsynth_series_choices, diffsynth_default_series, diffsynth_filepath = get_diffsynth_cn_defaults(controlnet_key)
|
| 849 |
+
for t_comp in diffsynth_cn_types:
|
| 850 |
+
updates[t_comp] = gr.update(choices=diffsynth_all_types, value=diffsynth_default_type)
|
| 851 |
+
for s_comp in diffsynth_cn_series:
|
| 852 |
+
updates[s_comp] = gr.update(choices=diffsynth_series_choices, value=diffsynth_default_series)
|
| 853 |
+
for f_comp in diffsynth_cn_filepaths:
|
| 854 |
+
updates[f_comp] = diffsynth_filepath
|
| 855 |
+
|
| 856 |
+
if ipa_preset and (arch_model_type in ["sdxl", "sd15", "sd35"]):
|
| 857 |
+
config = load_ipadapter_config()
|
| 858 |
+
ipa_arch_key = "SDXL" if arch_model_type in ["sdxl", "sd35"] else "SD1.5"
|
| 859 |
+
std_presets = config.get("IPAdapter_presets", {}).get(ipa_arch_key, [])
|
| 860 |
+
face_presets = config.get("IPAdapter_FaceID_presets", {}).get(ipa_arch_key, [])
|
| 861 |
+
all_ipa_presets = std_presets + face_presets
|
| 862 |
+
default_ipa = all_ipa_presets[0] if all_ipa_presets else None
|
| 863 |
+
updates[ipa_preset] = gr.update(choices=all_ipa_presets, value=default_ipa)
|
| 864 |
+
|
| 865 |
+
defaults = get_model_generation_defaults(val, arch_model_type, MODEL_DEFAULTS_CONFIG)
|
| 866 |
+
if steps_comp: updates[steps_comp] = gr.update(value=defaults.get('steps'))
|
| 867 |
+
if cfg_comp: updates[cfg_comp] = gr.update(value=defaults.get('cfg'))
|
| 868 |
+
if sampler_comp: updates[sampler_comp] = gr.update(value=defaults.get('sampler_name'))
|
| 869 |
+
if scheduler_comp: updates[scheduler_comp] = gr.update(value=defaults.get('scheduler'))
|
| 870 |
+
if prompt_comp: updates[prompt_comp] = gr.update(value=defaults.get('positive_prompt'))
|
| 871 |
+
if neg_prompt_comp: updates[neg_prompt_comp] = gr.update(value=defaults.get('negative_prompt'))
|
| 872 |
+
|
| 873 |
+
return updates
|
| 874 |
+
return update_fn
|
| 875 |
+
|
| 876 |
+
def make_model_change_fn(cat_comp_ref, cs_comp, ar_comp, width_comp, height_comp, cn_types, cn_series, cn_filepaths, diffsynth_cn_types, diffsynth_cn_series, diffsynth_cn_filepaths, arch_comp_ref, ipa_preset, lora_acc, cn_acc, diffsynth_cn_acc, ipa_acc, sd3_ipa_acc, flux1_ipa_acc, style_acc, embed_acc, cond_acc, ref_latent_acc, guidance_comp, prompt_comp, neg_prompt_comp, steps_comp, cfg_comp, sampler_comp, scheduler_comp):
|
| 877 |
+
def change_fn(*args):
|
| 878 |
+
model_name = args[0]
|
| 879 |
+
idx = 1
|
| 880 |
+
current_arch = args[idx] if arch_comp_ref and idx < len(args) else None
|
| 881 |
+
if arch_comp_ref: idx += 1
|
| 882 |
+
current_cat = args[idx] if cat_comp_ref and idx < len(args) else None
|
| 883 |
+
if cat_comp_ref: idx += 1
|
| 884 |
+
current_ar = args[idx] if idx < len(args) else None
|
| 885 |
+
from core.settings import MODEL_TYPE_MAP, FEATURES_CONFIG, ARCHITECTURES_CONFIG, MODEL_DEFAULTS_CONFIG, ARCH_CATEGORIES_MAP, MODEL_MAP_CHECKPOINT
|
| 886 |
+
from utils.app_utils import get_model_generation_defaults
|
| 887 |
+
m_type = MODEL_TYPE_MAP.get(model_name, "SDXL")
|
| 888 |
+
|
| 889 |
+
m_info = MODEL_MAP_CHECKPOINT.get(model_name)
|
| 890 |
+
m_cat = m_info[4] if m_info and len(m_info) > 4 else None
|
| 891 |
+
if not m_cat: m_cat = "ALL"
|
| 892 |
+
|
| 893 |
+
updates = {}
|
| 894 |
+
target_arch = m_type
|
| 895 |
+
if arch_comp_ref:
|
| 896 |
+
if current_arch == "ALL":
|
| 897 |
+
updates[arch_comp_ref] = gr.update()
|
| 898 |
+
target_arch = "ALL"
|
| 899 |
+
else:
|
| 900 |
+
updates[arch_comp_ref] = m_type
|
| 901 |
+
|
| 902 |
+
if cat_comp_ref:
|
| 903 |
+
if target_arch == "ALL":
|
| 904 |
+
valid_cats = list(set(cat for cats in ARCH_CATEGORIES_MAP.values() for cat in cats))
|
| 905 |
+
else:
|
| 906 |
+
valid_cats = ARCH_CATEGORIES_MAP.get(target_arch, [])
|
| 907 |
+
cat_choices = ["ALL"] + sorted(valid_cats)
|
| 908 |
+
|
| 909 |
+
if current_cat == "ALL":
|
| 910 |
+
updates[cat_comp_ref] = gr.update(choices=cat_choices)
|
| 911 |
+
else:
|
| 912 |
+
updates[cat_comp_ref] = gr.update(choices=cat_choices, value=m_cat)
|
| 913 |
+
|
| 914 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 915 |
+
arch_model_type = architectures_dict.get(m_type, {}).get("model_type", m_type.lower().replace(" ", "").replace(".", ""))
|
| 916 |
+
|
| 917 |
+
arch_features = FEATURES_CONFIG.get(arch_model_type, FEATURES_CONFIG.get('default', {}))
|
| 918 |
+
enabled_chains = arch_features.get('enabled_chains', [])
|
| 919 |
+
|
| 920 |
+
if lora_acc: updates[lora_acc] = gr.update(visible=('lora' in enabled_chains))
|
| 921 |
+
if cn_acc: updates[cn_acc] = gr.update(visible=('controlnet' in enabled_chains))
|
| 922 |
+
if diffsynth_cn_acc: updates[diffsynth_cn_acc] = gr.update(visible=('controlnet_model_patch' in enabled_chains))
|
| 923 |
+
if ipa_acc: updates[ipa_acc] = gr.update(visible=('ipadapter' in enabled_chains))
|
| 924 |
+
if flux1_ipa_acc: updates[flux1_ipa_acc] = gr.update(visible=('flux1_ipadapter' in enabled_chains))
|
| 925 |
+
if sd3_ipa_acc: updates[sd3_ipa_acc] = gr.update(visible=('sd3_ipadapter' in enabled_chains))
|
| 926 |
+
if style_acc: updates[style_acc] = gr.update(visible=('style' in enabled_chains))
|
| 927 |
+
if embed_acc: updates[embed_acc] = gr.update(visible=('embedding' in enabled_chains))
|
| 928 |
+
if cond_acc: updates[cond_acc] = gr.update(visible=('conditioning' in enabled_chains))
|
| 929 |
+
if ref_latent_acc: updates[ref_latent_acc] = gr.update(visible=('reference_latent' in enabled_chains))
|
| 930 |
+
|
| 931 |
+
if cs_comp:
|
| 932 |
+
updates[cs_comp] = gr.update(visible=(arch_model_type == "sd15"))
|
| 933 |
+
if guidance_comp:
|
| 934 |
+
updates[guidance_comp] = gr.update(visible=(arch_model_type == "flux1"))
|
| 935 |
+
|
| 936 |
+
if ar_comp:
|
| 937 |
+
res_key = arch_model_type
|
| 938 |
+
if res_key not in RESOLUTION_MAP:
|
| 939 |
+
res_key = 'sdxl'
|
| 940 |
+
res_map = RESOLUTION_MAP.get(res_key, {})
|
| 941 |
+
target_ar = current_ar if current_ar in res_map else (list(res_map.keys())[0] if res_map else "1:1 (Square)")
|
| 942 |
+
updates[ar_comp] = gr.update(choices=list(res_map.keys()), value=target_ar)
|
| 943 |
+
if width_comp and height_comp and target_ar in res_map:
|
| 944 |
+
updates[width_comp] = gr.update(value=res_map[target_ar][0])
|
| 945 |
+
updates[height_comp] = gr.update(value=res_map[target_ar][1])
|
| 946 |
+
|
| 947 |
+
controlnet_key = architectures_dict.get(m_type, {}).get("controlnet_key", m_type)
|
| 948 |
+
|
| 949 |
+
all_types, default_type, series_choices, default_series, filepath = get_cn_defaults(controlnet_key)
|
| 950 |
+
for t_comp in cn_types:
|
| 951 |
+
updates[t_comp] = gr.update(choices=all_types, value=default_type)
|
| 952 |
+
for s_comp in cn_series:
|
| 953 |
+
updates[s_comp] = gr.update(choices=series_choices, value=default_series)
|
| 954 |
+
for f_comp in cn_filepaths:
|
| 955 |
+
updates[f_comp] = filepath
|
| 956 |
+
|
| 957 |
+
diffsynth_all_types, diffsynth_default_type, diffsynth_series_choices, diffsynth_default_series, diffsynth_filepath = get_diffsynth_cn_defaults(controlnet_key)
|
| 958 |
+
for t_comp in diffsynth_cn_types:
|
| 959 |
+
updates[t_comp] = gr.update(choices=diffsynth_all_types, value=diffsynth_default_type)
|
| 960 |
+
for s_comp in diffsynth_cn_series:
|
| 961 |
+
updates[s_comp] = gr.update(choices=diffsynth_series_choices, value=diffsynth_default_series)
|
| 962 |
+
for f_comp in diffsynth_cn_filepaths:
|
| 963 |
+
updates[f_comp] = diffsynth_filepath
|
| 964 |
+
|
| 965 |
+
if ipa_preset and (arch_model_type in ["sdxl", "sd15", "sd35"]):
|
| 966 |
+
config = load_ipadapter_config()
|
| 967 |
+
ipa_arch_key = "SDXL" if arch_model_type in ["sdxl", "sd35"] else "SD1.5"
|
| 968 |
+
std_presets = config.get("IPAdapter_presets", {}).get(ipa_arch_key, [])
|
| 969 |
+
face_presets = config.get("IPAdapter_FaceID_presets", {}).get(ipa_arch_key, [])
|
| 970 |
+
all_ipa_presets = std_presets + face_presets
|
| 971 |
+
default_ipa = all_ipa_presets[0] if all_ipa_presets else None
|
| 972 |
+
updates[ipa_preset] = gr.update(choices=all_ipa_presets, value=default_ipa)
|
| 973 |
+
|
| 974 |
+
defaults = get_model_generation_defaults(model_name, arch_model_type, MODEL_DEFAULTS_CONFIG)
|
| 975 |
+
if steps_comp: updates[steps_comp] = gr.update(value=defaults.get('steps'))
|
| 976 |
+
if cfg_comp: updates[cfg_comp] = gr.update(value=defaults.get('cfg'))
|
| 977 |
+
if sampler_comp: updates[sampler_comp] = gr.update(value=defaults.get('sampler_name'))
|
| 978 |
+
if scheduler_comp: updates[scheduler_comp] = gr.update(value=defaults.get('scheduler'))
|
| 979 |
+
if prompt_comp: updates[prompt_comp] = gr.update(value=defaults.get('positive_prompt'))
|
| 980 |
+
if neg_prompt_comp: updates[neg_prompt_comp] = gr.update(value=defaults.get('negative_prompt'))
|
| 981 |
+
|
| 982 |
+
return updates
|
| 983 |
+
return change_fn
|
| 984 |
|
| 985 |
|
| 986 |
for prefix, task_type in [
|
| 987 |
("txt2img", "txt2img"), ("img2img", "img2img"), ("inpaint", "inpaint"),
|
| 988 |
("outpaint", "outpaint"), ("hires_fix", "hires_fix"),
|
| 989 |
]:
|
| 990 |
+
|
| 991 |
+
arch_comp = ui_components.get(f'model_arch_{prefix}')
|
| 992 |
+
cat_comp = ui_components.get(f'model_cat_{prefix}')
|
| 993 |
+
model_comp = ui_components.get(f'base_model_{prefix}')
|
| 994 |
+
clip_skip_comp = ui_components.get(f'clip_skip_{prefix}') or ui_components.get(f'{prefix}_clip_skip')
|
| 995 |
+
guidance_comp = ui_components.get(f'guidance_{prefix}') or ui_components.get(f'{prefix}_guidance')
|
| 996 |
+
aspect_ratio_comp = ui_components.get(f'aspect_ratio_{prefix}') or ui_components.get(f'{prefix}_aspect_ratio_dropdown')
|
| 997 |
+
width_comp = ui_components.get(f'width_{prefix}') or ui_components.get(f'{prefix}_width')
|
| 998 |
+
height_comp = ui_components.get(f'height_{prefix}') or ui_components.get(f'{prefix}_height')
|
| 999 |
+
|
| 1000 |
+
cn_types_list = ui_components.get(f'controlnet_types_{prefix}', [])
|
| 1001 |
+
cn_series_list = ui_components.get(f'controlnet_series_{prefix}', [])
|
| 1002 |
+
cn_filepaths_list = ui_components.get(f'controlnet_filepaths_{prefix}', [])
|
| 1003 |
+
|
| 1004 |
+
diffsynth_cn_types_list = ui_components.get(f'diffsynth_controlnet_types_{prefix}', [])
|
| 1005 |
+
diffsynth_cn_series_list = ui_components.get(f'diffsynth_controlnet_series_{prefix}', [])
|
| 1006 |
+
diffsynth_cn_filepaths_list = ui_components.get(f'diffsynth_controlnet_filepaths_{prefix}', [])
|
| 1007 |
+
|
| 1008 |
+
lora_accordion = ui_components.get(f'lora_accordion_{prefix}')
|
| 1009 |
+
cn_accordion = ui_components.get(f'controlnet_accordion_{prefix}')
|
| 1010 |
+
diffsynth_cn_accordion = ui_components.get(f'diffsynth_controlnet_accordion_{prefix}')
|
| 1011 |
+
ipa_accordion = ui_components.get(f'ipadapter_accordion_{prefix}')
|
| 1012 |
+
sd3_ipa_accordion = ui_components.get(f'sd3_ipadapter_accordion_{prefix}')
|
| 1013 |
+
flux1_ipa_accordion = ui_components.get(f'flux1_ipadapter_accordion_{prefix}')
|
| 1014 |
+
style_accordion = ui_components.get(f'style_accordion_{prefix}')
|
| 1015 |
+
embedding_accordion = ui_components.get(f'embedding_accordion_{prefix}')
|
| 1016 |
+
conditioning_accordion = ui_components.get(f'conditioning_accordion_{prefix}')
|
| 1017 |
+
ref_latent_accordion = ui_components.get(f'reference_latent_accordion_{prefix}')
|
| 1018 |
+
|
| 1019 |
+
ipa_preset_list = ui_components.get(f'ipadapter_final_preset_{prefix}')
|
| 1020 |
+
|
| 1021 |
+
prompt_comp = ui_components.get(f'prompt_{prefix}') or ui_components.get(f'{prefix}_positive_prompt')
|
| 1022 |
+
neg_prompt_comp = ui_components.get(f'neg_prompt_{prefix}') or ui_components.get(f'{prefix}_negative_prompt')
|
| 1023 |
+
steps_comp = ui_components.get(f'steps_{prefix}') or ui_components.get(f'{prefix}_steps')
|
| 1024 |
+
cfg_comp = ui_components.get(f'cfg_{prefix}') or ui_components.get(f'{prefix}_cfg')
|
| 1025 |
+
sampler_comp = ui_components.get(f'sampler_{prefix}') or ui_components.get(f'{prefix}_sampler_name')
|
| 1026 |
+
scheduler_comp = ui_components.get(f'scheduler_{prefix}') or ui_components.get(f'{prefix}_scheduler')
|
| 1027 |
+
|
| 1028 |
+
extra_comps = [prompt_comp, neg_prompt_comp, steps_comp, cfg_comp, sampler_comp, scheduler_comp, width_comp, height_comp]
|
| 1029 |
+
valid_extra_comps = [c for c in extra_comps if c is not None]
|
| 1030 |
+
|
| 1031 |
+
if arch_comp and cat_comp and model_comp:
|
| 1032 |
+
outputs = [model_comp, cat_comp]
|
| 1033 |
+
if clip_skip_comp: outputs.append(clip_skip_comp)
|
| 1034 |
+
if guidance_comp: outputs.append(guidance_comp)
|
| 1035 |
+
if aspect_ratio_comp: outputs.append(aspect_ratio_comp)
|
| 1036 |
+
outputs.extend(cn_types_list + cn_series_list + cn_filepaths_list)
|
| 1037 |
+
outputs.extend(diffsynth_cn_types_list + diffsynth_cn_series_list + diffsynth_cn_filepaths_list)
|
| 1038 |
+
if lora_accordion: outputs.append(lora_accordion)
|
| 1039 |
+
if cn_accordion: outputs.append(cn_accordion)
|
| 1040 |
+
if diffsynth_cn_accordion: outputs.append(diffsynth_cn_accordion)
|
| 1041 |
+
if ipa_accordion: outputs.append(ipa_accordion)
|
| 1042 |
+
if sd3_ipa_accordion: outputs.append(sd3_ipa_accordion)
|
| 1043 |
+
if flux1_ipa_accordion: outputs.append(flux1_ipa_accordion)
|
| 1044 |
+
if style_accordion: outputs.append(style_accordion)
|
| 1045 |
+
if embedding_accordion: outputs.append(embedding_accordion)
|
| 1046 |
+
if conditioning_accordion: outputs.append(conditioning_accordion)
|
| 1047 |
+
if ref_latent_accordion: outputs.append(ref_latent_accordion)
|
| 1048 |
+
if ipa_preset_list: outputs.append(ipa_preset_list)
|
| 1049 |
+
|
| 1050 |
+
outputs.extend(valid_extra_comps)
|
| 1051 |
+
|
| 1052 |
+
update_fn = make_update_fn(
|
| 1053 |
+
model_comp, cat_comp, clip_skip_comp, aspect_ratio_comp, width_comp, height_comp,
|
| 1054 |
+
cn_types_list, cn_series_list, cn_filepaths_list,
|
| 1055 |
+
diffsynth_cn_types_list, diffsynth_cn_series_list, diffsynth_cn_filepaths_list,
|
| 1056 |
+
ipa_preset_list, lora_accordion, cn_accordion, diffsynth_cn_accordion, ipa_accordion, sd3_ipa_accordion, flux1_ipa_accordion, style_accordion, embedding_accordion, conditioning_accordion,
|
| 1057 |
+
ref_latent_accordion, guidance_comp, prompt_comp, neg_prompt_comp, steps_comp, cfg_comp, sampler_comp, scheduler_comp
|
| 1058 |
+
)
|
| 1059 |
+
inputs = [arch_comp, cat_comp]
|
| 1060 |
+
if aspect_ratio_comp:
|
| 1061 |
+
inputs.append(aspect_ratio_comp)
|
| 1062 |
+
arch_comp.change(fn=update_fn, inputs=inputs, outputs=outputs)
|
| 1063 |
+
cat_comp.change(fn=update_fn, inputs=inputs, outputs=outputs)
|
| 1064 |
+
|
| 1065 |
+
if model_comp:
|
| 1066 |
+
outputs2 = []
|
| 1067 |
+
if arch_comp: outputs2.append(arch_comp)
|
| 1068 |
+
if cat_comp: outputs2.append(cat_comp)
|
| 1069 |
+
if clip_skip_comp: outputs2.append(clip_skip_comp)
|
| 1070 |
+
if guidance_comp: outputs2.append(guidance_comp)
|
| 1071 |
+
if aspect_ratio_comp: outputs2.append(aspect_ratio_comp)
|
| 1072 |
+
outputs2.extend(cn_types_list + cn_series_list + cn_filepaths_list)
|
| 1073 |
+
outputs2.extend(diffsynth_cn_types_list + diffsynth_cn_series_list + diffsynth_cn_filepaths_list)
|
| 1074 |
+
if lora_accordion: outputs2.append(lora_accordion)
|
| 1075 |
+
if cn_accordion: outputs2.append(cn_accordion)
|
| 1076 |
+
if diffsynth_cn_accordion: outputs2.append(diffsynth_cn_accordion)
|
| 1077 |
+
if ipa_accordion: outputs2.append(ipa_accordion)
|
| 1078 |
+
if sd3_ipa_accordion: outputs2.append(sd3_ipa_accordion)
|
| 1079 |
+
if flux1_ipa_accordion: outputs2.append(flux1_ipa_accordion)
|
| 1080 |
+
if style_accordion: outputs2.append(style_accordion)
|
| 1081 |
+
if embedding_accordion: outputs2.append(embedding_accordion)
|
| 1082 |
+
if conditioning_accordion: outputs2.append(conditioning_accordion)
|
| 1083 |
+
if ref_latent_accordion: outputs2.append(ref_latent_accordion)
|
| 1084 |
+
if ipa_preset_list: outputs2.append(ipa_preset_list)
|
| 1085 |
+
|
| 1086 |
+
outputs2.extend(valid_extra_comps)
|
| 1087 |
+
|
| 1088 |
+
if outputs2:
|
| 1089 |
+
inputs2 = [model_comp]
|
| 1090 |
+
if arch_comp: inputs2.append(arch_comp)
|
| 1091 |
+
if cat_comp: inputs2.append(cat_comp)
|
| 1092 |
+
if aspect_ratio_comp: inputs2.append(aspect_ratio_comp)
|
| 1093 |
+
change_fn = make_model_change_fn(
|
| 1094 |
+
cat_comp, clip_skip_comp, aspect_ratio_comp, width_comp, height_comp,
|
| 1095 |
+
cn_types_list, cn_series_list, cn_filepaths_list,
|
| 1096 |
+
diffsynth_cn_types_list, diffsynth_cn_series_list, diffsynth_cn_filepaths_list,
|
| 1097 |
+
arch_comp, ipa_preset_list, lora_accordion, cn_accordion, diffsynth_cn_accordion, ipa_accordion, sd3_ipa_accordion, flux1_ipa_accordion, style_accordion, embedding_accordion, conditioning_accordion,
|
| 1098 |
+
ref_latent_accordion, guidance_comp, prompt_comp, neg_prompt_comp, steps_comp, cfg_comp, sampler_comp, scheduler_comp
|
| 1099 |
)
|
| 1100 |
+
model_comp.change(fn=change_fn, inputs=inputs2, outputs=outputs2)
|
| 1101 |
|
| 1102 |
+
create_lora_event_handlers(prefix)
|
| 1103 |
+
create_controlnet_event_handlers(prefix)
|
| 1104 |
+
create_diffsynth_controlnet_event_handlers(prefix)
|
| 1105 |
+
create_ipadapter_event_handlers(prefix)
|
| 1106 |
+
create_embedding_event_handlers(prefix)
|
| 1107 |
+
create_conditioning_event_handlers(prefix)
|
| 1108 |
+
create_flux1_ipadapter_event_handlers(prefix)
|
| 1109 |
+
create_style_event_handlers(prefix)
|
| 1110 |
+
create_reference_latent_event_handlers(prefix)
|
| 1111 |
create_run_event(prefix, task_type)
|
| 1112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1114 |
if 'view_mode_inpaint' in ui_components:
|
| 1115 |
def toggle_inpaint_fullscreen_view(view_mode):
|
| 1116 |
is_fullscreen = (view_mode == "Fullscreen View")
|
| 1117 |
other_elements_visible = not is_fullscreen
|
| 1118 |
editor_height = 800 if is_fullscreen else 272
|
| 1119 |
+
|
| 1120 |
+
updates = {
|
| 1121 |
ui_components['prompts_column_inpaint']: gr.update(visible=other_elements_visible),
|
| 1122 |
ui_components['params_and_gallery_row_inpaint']: gr.update(visible=other_elements_visible),
|
| 1123 |
ui_components['accordion_wrapper_inpaint']: gr.update(visible=other_elements_visible),
|
| 1124 |
ui_components['input_image_dict_inpaint']: gr.update(height=editor_height),
|
| 1125 |
}
|
| 1126 |
+
|
| 1127 |
+
model_and_run_rows = ui_components.get('model_and_run_row_inpaint', [])
|
| 1128 |
+
for row in model_and_run_rows:
|
| 1129 |
+
updates[row] = gr.update(visible=other_elements_visible)
|
| 1130 |
+
|
| 1131 |
+
return updates
|
| 1132 |
|
| 1133 |
+
output_components = []
|
| 1134 |
+
model_and_run_rows = ui_components.get('model_and_run_row_inpaint', [])
|
| 1135 |
+
if isinstance(model_and_run_rows, list):
|
| 1136 |
+
output_components.extend(model_and_run_rows)
|
| 1137 |
+
else:
|
| 1138 |
+
output_components.append(model_and_run_rows)
|
| 1139 |
+
|
| 1140 |
+
output_components.extend([
|
| 1141 |
+
ui_components['prompts_column_inpaint'],
|
| 1142 |
+
ui_components['params_and_gallery_row_inpaint'],
|
| 1143 |
+
ui_components['accordion_wrapper_inpaint'],
|
| 1144 |
ui_components['input_image_dict_inpaint']
|
| 1145 |
+
])
|
| 1146 |
+
|
| 1147 |
+
ui_components['view_mode_inpaint'].change(
|
| 1148 |
+
fn=toggle_inpaint_fullscreen_view,
|
| 1149 |
+
inputs=[ui_components['view_mode_inpaint']],
|
| 1150 |
+
outputs=output_components,
|
| 1151 |
+
show_progress=False
|
| 1152 |
+
)
|
| 1153 |
|
| 1154 |
def initialize_all_cn_dropdowns():
|
| 1155 |
+
from core.settings import MODEL_TYPE_MAP, MODEL_MAP_CHECKPOINT, ARCHITECTURES_CONFIG
|
| 1156 |
+
default_model_name = list(MODEL_MAP_CHECKPOINT.keys())[0] if MODEL_MAP_CHECKPOINT else None
|
| 1157 |
+
default_m_type = MODEL_TYPE_MAP.get(default_model_name, "SDXL") if default_model_name else "SDXL"
|
| 1158 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 1159 |
+
controlnet_key = architectures_dict.get(default_m_type, {}).get("controlnet_key", default_m_type)
|
| 1160 |
|
| 1161 |
+
all_types, default_type, series_choices, default_series, filepath = get_cn_defaults(controlnet_key)
|
| 1162 |
+
diffsynth_all_types, diffsynth_default_type, diffsynth_series_choices, diffsynth_default_series, diffsynth_filepath = get_diffsynth_cn_defaults(controlnet_key)
|
|
|
|
|
|
|
| 1163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1164 |
updates = {}
|
| 1165 |
for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
|
| 1166 |
if f'controlnet_types_{prefix}' in ui_components:
|
|
|
|
| 1170 |
updates[series_dd] = gr.update(choices=series_choices, value=default_series)
|
| 1171 |
for filepath_state in ui_components[f'controlnet_filepaths_{prefix}']:
|
| 1172 |
updates[filepath_state] = filepath
|
| 1173 |
+
|
| 1174 |
+
if f'diffsynth_controlnet_types_{prefix}' in ui_components:
|
| 1175 |
+
for type_dd in ui_components[f'diffsynth_controlnet_types_{prefix}']:
|
| 1176 |
+
updates[type_dd] = gr.update(choices=diffsynth_all_types, value=diffsynth_default_type)
|
| 1177 |
+
for series_dd in ui_components[f'diffsynth_controlnet_series_{prefix}']:
|
| 1178 |
+
updates[series_dd] = gr.update(choices=diffsynth_series_choices, value=diffsynth_default_series)
|
| 1179 |
+
for filepath_state in ui_components[f'diffsynth_controlnet_filepaths_{prefix}']:
|
| 1180 |
+
updates[filepath_state] = diffsynth_filepath
|
| 1181 |
+
|
| 1182 |
return updates
|
| 1183 |
|
| 1184 |
def initialize_all_ipa_dropdowns():
|
| 1185 |
config = load_ipadapter_config()
|
| 1186 |
+
if not config: return {}
|
| 1187 |
+
|
| 1188 |
+
from core.settings import MODEL_TYPE_MAP, MODEL_MAP_CHECKPOINT, ARCHITECTURES_CONFIG
|
| 1189 |
+
default_model_name = list(MODEL_MAP_CHECKPOINT.keys())[0] if MODEL_MAP_CHECKPOINT else None
|
| 1190 |
+
default_m_type = MODEL_TYPE_MAP.get(default_model_name, "SDXL") if default_model_name else "SDXL"
|
| 1191 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 1192 |
+
arch_model_type = architectures_dict.get(default_m_type, {}).get("model_type", default_m_type.lower().replace(" ", "").replace(".", ""))
|
| 1193 |
+
ipa_arch_key = "SDXL" if arch_model_type in ["sdxl", "sd35"] else "SD1.5"
|
| 1194 |
+
|
| 1195 |
+
unified_presets = config.get("IPAdapter_presets", {}).get(ipa_arch_key, [])
|
| 1196 |
+
faceid_presets = config.get("IPAdapter_FaceID_presets", {}).get(ipa_arch_key, [])
|
|
|
|
| 1197 |
|
| 1198 |
all_presets = unified_presets + faceid_presets
|
| 1199 |
default_preset = all_presets[0] if all_presets else None
|
|
|
|
| 1216 |
|
| 1217 |
all_updates = {**cn_updates, **ipa_updates}
|
| 1218 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1219 |
return all_updates
|
| 1220 |
|
| 1221 |
all_load_outputs = []
|
|
|
|
| 1224 |
all_load_outputs.extend(ui_components[f'controlnet_types_{prefix}'])
|
| 1225 |
all_load_outputs.extend(ui_components[f'controlnet_series_{prefix}'])
|
| 1226 |
all_load_outputs.extend(ui_components[f'controlnet_filepaths_{prefix}'])
|
| 1227 |
+
if f'diffsynth_controlnet_types_{prefix}' in ui_components:
|
| 1228 |
+
all_load_outputs.extend(ui_components[f'diffsynth_controlnet_types_{prefix}'])
|
| 1229 |
+
all_load_outputs.extend(ui_components[f'diffsynth_controlnet_series_{prefix}'])
|
| 1230 |
+
all_load_outputs.extend(ui_components[f'diffsynth_controlnet_filepaths_{prefix}'])
|
| 1231 |
if f'ipadapter_final_preset_{prefix}' in ui_components:
|
| 1232 |
all_load_outputs.extend(ui_components[f'ipadapter_lora_strengths_{prefix}'])
|
| 1233 |
all_load_outputs.append(ui_components[f'ipadapter_final_preset_{prefix}'])
|
| 1234 |
all_load_outputs.append(ui_components[f'ipadapter_final_lora_strength_{prefix}'])
|
| 1235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1236 |
if all_load_outputs:
|
| 1237 |
demo.load(
|
| 1238 |
fn=run_on_load,
|
| 1239 |
outputs=all_load_outputs
|
| 1240 |
+
)
|
| 1241 |
+
|
| 1242 |
+
def on_aspect_ratio_change(ratio_key, model_display_name):
|
| 1243 |
+
from core.settings import MODEL_TYPE_MAP, ARCHITECTURES_CONFIG
|
| 1244 |
+
m_type = MODEL_TYPE_MAP.get(model_display_name, 'SDXL')
|
| 1245 |
+
architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
|
| 1246 |
+
arch_model_type = architectures_dict.get(m_type, {}).get("model_type", m_type.lower().replace(" ", "").replace(".", ""))
|
| 1247 |
+
|
| 1248 |
+
res_map = RESOLUTION_MAP.get(arch_model_type, RESOLUTION_MAP.get("sdxl", {}))
|
| 1249 |
+
w, h = res_map.get(ratio_key, (1024, 1024))
|
| 1250 |
+
return w, h
|
| 1251 |
+
|
| 1252 |
+
for prefix in ["txt2img", "img2img", "inpaint", "outpaint", "hires_fix"]:
|
| 1253 |
+
aspect_ratio_dropdown = ui_components.get(f'aspect_ratio_{prefix}') or ui_components.get(f'{prefix}_aspect_ratio_dropdown')
|
| 1254 |
+
width_component = ui_components.get(f'width_{prefix}') or ui_components.get(f'{prefix}_width')
|
| 1255 |
+
height_component = ui_components.get(f'height_{prefix}') or ui_components.get(f'{prefix}_height')
|
| 1256 |
+
model_dropdown = ui_components.get(f'base_model_{prefix}')
|
| 1257 |
+
if aspect_ratio_dropdown and width_component and height_component and model_dropdown:
|
| 1258 |
+
aspect_ratio_dropdown.change(fn=on_aspect_ratio_change, inputs=[aspect_ratio_dropdown, model_dropdown], outputs=[width_component, height_component], show_progress=False)
|
ui/layout.py
CHANGED
|
@@ -6,83 +6,40 @@ from .shared import txt2img_ui, img2img_ui, inpaint_ui, outpaint_ui, hires_fix_u
|
|
| 6 |
|
| 7 |
MAX_DYNAMIC_CONTROLS = 10
|
| 8 |
|
| 9 |
-
def get_preprocessor_choices():
|
| 10 |
-
from nodes import NODE_DISPLAY_NAME_MAPPINGS
|
| 11 |
-
|
| 12 |
-
preprocessor_names = [
|
| 13 |
-
display_name for class_name, display_name in NODE_DISPLAY_NAME_MAPPINGS.items()
|
| 14 |
-
if "Preprocessor" in class_name or "Segmentor" in class_name or
|
| 15 |
-
"Estimator" in class_name or "Detector" in class_name
|
| 16 |
-
]
|
| 17 |
-
return sorted(list(set(preprocessor_names)))
|
| 18 |
-
|
| 19 |
-
|
| 20 |
def build_ui(event_handler_function):
|
| 21 |
ui_components = {}
|
| 22 |
|
| 23 |
with gr.Blocks() as demo:
|
| 24 |
-
gr.Markdown("# ImageGen
|
| 25 |
gr.Markdown(
|
| 26 |
-
"This demo is a streamlined version of the [Comfy web UI](https://github.com/RioShiina47/comfy-webui)'s
|
| 27 |
"Other versions are also available: "
|
| 28 |
"[FLUX.2](https://huggingface.co/spaces/RioShiina/ImageGen-FLUX.2), "
|
| 29 |
"[Z-Image](https://huggingface.co/spaces/RioShiina/ImageGen-Z-Image), "
|
| 30 |
"[Qwen-Image](https://huggingface.co/spaces/RioShiina/ImageGen-Qwen-Image), "
|
| 31 |
-
"[
|
| 32 |
"[Illustrious](https://huggingface.co/spaces/RioShiina/ImageGen-Illustrious), "
|
| 33 |
"[NoobAI](https://huggingface.co/spaces/RioShiina/ImageGen-NoobAI), "
|
| 34 |
"[Pony](https://huggingface.co/spaces/RioShiina/ImageGen-Pony)"
|
| 35 |
)
|
| 36 |
with gr.Tabs(elem_id="tabs_container") as tabs:
|
| 37 |
-
with gr.TabItem("
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
with gr.TabItem("Img2Img", id=1):
|
| 43 |
-
ui_components.update(img2img_ui.create_ui())
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
ui_components['image_gen_tabs'] = image_gen_tabs
|
| 55 |
|
| 56 |
-
with gr.TabItem("Controlnet Preprocessors", id=1):
|
| 57 |
-
gr.Markdown("## ControlNet Auxiliary Preprocessors")
|
| 58 |
-
gr.Markdown("Powered by [Fannovel16/comfyui_controlnet_aux](https://github.com/Fannovel16/comfyui_controlnet_aux).")
|
| 59 |
-
gr.Markdown("Upload an image or video to process it with a ControlNet preprocessor.")
|
| 60 |
-
with gr.Row():
|
| 61 |
-
with gr.Column(scale=1):
|
| 62 |
-
cn_input_type = gr.Radio(["Image", "Video"], label="Input Type", value="Image")
|
| 63 |
-
cn_image_input = gr.Image(type="pil", label="Input Image", visible=True, height=384)
|
| 64 |
-
cn_video_input = gr.Video(label="Input Video", visible=False)
|
| 65 |
-
preprocessor_cn = gr.Dropdown(label="Preprocessor", choices=get_preprocessor_choices(), value="Canny Edge")
|
| 66 |
-
preprocessor_model_cn = gr.Dropdown(label="Preprocessor Model", choices=[], value=None, visible=False)
|
| 67 |
-
with gr.Column() as preprocessor_settings_ui:
|
| 68 |
-
cn_sliders, cn_dropdowns, cn_checkboxes = [], [], []
|
| 69 |
-
for i in range(MAX_DYNAMIC_CONTROLS):
|
| 70 |
-
cn_sliders.append(gr.Slider(visible=False, label=f"dyn_slider_{i}"))
|
| 71 |
-
cn_dropdowns.append(gr.Dropdown(visible=False, label=f"dyn_dropdown_{i}"))
|
| 72 |
-
cn_checkboxes.append(gr.Checkbox(visible=False, label=f"dyn_checkbox_{i}"))
|
| 73 |
-
run_cn = gr.Button("Run Preprocessor", variant="primary")
|
| 74 |
-
with gr.Column(scale=1):
|
| 75 |
-
output_gallery_cn = gr.Gallery(label="Output", show_label=False, object_fit="contain", height=512)
|
| 76 |
-
zero_gpu_cn = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional")
|
| 77 |
-
ui_components.update({
|
| 78 |
-
"cn_input_type": cn_input_type, "cn_image_input": cn_image_input, "cn_video_input": cn_video_input,
|
| 79 |
-
"preprocessor_cn": preprocessor_cn, "preprocessor_model_cn": preprocessor_model_cn, "run_cn": run_cn,
|
| 80 |
-
"zero_gpu_cn": zero_gpu_cn, "output_gallery_cn": output_gallery_cn,
|
| 81 |
-
"preprocessor_settings_ui": preprocessor_settings_ui, "cn_sliders": cn_sliders,
|
| 82 |
-
"cn_dropdowns": cn_dropdowns, "cn_checkboxes": cn_checkboxes
|
| 83 |
-
})
|
| 84 |
-
|
| 85 |
ui_components["tabs"] = tabs
|
|
|
|
| 86 |
|
| 87 |
gr.Markdown("<div style='text-align: center; margin-top: 20px;'>Made by RioShiina with ❤️<br><a href='https://github.com/RioShiina47' target='_blank'>GitHub</a> | <a href='https://huggingface.co/RioShiina' target='_blank'>Hugging Face</a> | <a href='https://civitai.com/user/RioShiina' target='_blank'>Civitai</a></div>")
|
| 88 |
|
|
|
|
| 6 |
|
| 7 |
MAX_DYNAMIC_CONTROLS = 10
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
def build_ui(event_handler_function):
|
| 10 |
ui_components = {}
|
| 11 |
|
| 12 |
with gr.Blocks() as demo:
|
| 13 |
+
gr.Markdown("# ImageGen")
|
| 14 |
gr.Markdown(
|
| 15 |
+
"This demo is a streamlined version of the [Comfy web UI](https://github.com/RioShiina47/comfy-webui)'s ImageGen functionality. "
|
| 16 |
"Other versions are also available: "
|
| 17 |
"[FLUX.2](https://huggingface.co/spaces/RioShiina/ImageGen-FLUX.2), "
|
| 18 |
"[Z-Image](https://huggingface.co/spaces/RioShiina/ImageGen-Z-Image), "
|
| 19 |
"[Qwen-Image](https://huggingface.co/spaces/RioShiina/ImageGen-Qwen-Image), "
|
| 20 |
+
"[Anime](https://huggingface.co/spaces/RioShiina/ImageGen-Anime), "
|
| 21 |
"[Illustrious](https://huggingface.co/spaces/RioShiina/ImageGen-Illustrious), "
|
| 22 |
"[NoobAI](https://huggingface.co/spaces/RioShiina/ImageGen-NoobAI), "
|
| 23 |
"[Pony](https://huggingface.co/spaces/RioShiina/ImageGen-Pony)"
|
| 24 |
)
|
| 25 |
with gr.Tabs(elem_id="tabs_container") as tabs:
|
| 26 |
+
with gr.TabItem("Txt2Img", id=0):
|
| 27 |
+
ui_components.update(txt2img_ui.create_ui())
|
| 28 |
+
|
| 29 |
+
with gr.TabItem("Img2Img", id=1):
|
| 30 |
+
ui_components.update(img2img_ui.create_ui())
|
|
|
|
|
|
|
| 31 |
|
| 32 |
+
with gr.TabItem("Inpaint", id=2):
|
| 33 |
+
ui_components.update(inpaint_ui.create_ui())
|
| 34 |
|
| 35 |
+
with gr.TabItem("Outpaint", id=3):
|
| 36 |
+
ui_components.update(outpaint_ui.create_ui())
|
| 37 |
|
| 38 |
+
with gr.TabItem("Hires. Fix", id=4):
|
| 39 |
+
ui_components.update(hires_fix_ui.create_ui())
|
|
|
|
|
|
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
ui_components["tabs"] = tabs
|
| 42 |
+
ui_components["image_gen_tabs"] = tabs
|
| 43 |
|
| 44 |
gr.Markdown("<div style='text-align: center; margin-top: 20px;'>Made by RioShiina with ❤️<br><a href='https://github.com/RioShiina47' target='_blank'>GitHub</a> | <a href='https://huggingface.co/RioShiina' target='_blank'>Hugging Face</a> | <a href='https://civitai.com/user/RioShiina' target='_blank'>Civitai</a></div>")
|
| 45 |
|
ui/shared/hires_fix_ui.py
CHANGED
|
@@ -4,7 +4,10 @@ from comfy_integration.nodes import SAMPLER_CHOICES, SCHEDULER_CHOICES
|
|
| 4 |
from .ui_components import (
|
| 5 |
create_lora_settings_ui,
|
| 6 |
create_controlnet_ui, create_ipadapter_ui, create_embedding_ui,
|
| 7 |
-
create_conditioning_ui, create_vae_override_ui,
|
|
|
|
|
|
|
|
|
|
| 8 |
)
|
| 9 |
|
| 10 |
def create_ui():
|
|
@@ -12,7 +15,10 @@ def create_ui():
|
|
| 12 |
components = {}
|
| 13 |
|
| 14 |
with gr.Column():
|
|
|
|
|
|
|
| 15 |
with gr.Row():
|
|
|
|
| 16 |
components[f'base_model_{prefix}'] = gr.Dropdown(
|
| 17 |
label="Base Model",
|
| 18 |
choices=list(MODEL_MAP_CHECKPOINT.keys()),
|
|
@@ -26,8 +32,8 @@ def create_ui():
|
|
| 26 |
with gr.Column(scale=1):
|
| 27 |
components[f'input_image_{prefix}'] = gr.Image(type="pil", label="Input Image", height=255)
|
| 28 |
with gr.Column(scale=2):
|
| 29 |
-
components[f'prompt_{prefix}'] = gr.Text(label="Prompt", lines=3
|
| 30 |
-
components[f'neg_prompt_{prefix}'] = gr.Text(label="Negative prompt", lines=3
|
| 31 |
|
| 32 |
with gr.Row():
|
| 33 |
with gr.Column(scale=1):
|
|
@@ -54,21 +60,26 @@ def create_ui():
|
|
| 54 |
components[f'seed_{prefix}'] = gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
|
| 55 |
components[f'batch_size_{prefix}'] = gr.Slider(label="Batch Size", minimum=1, maximum=16, step=1, value=1)
|
| 56 |
with gr.Row():
|
|
|
|
|
|
|
| 57 |
components[f'zero_gpu_{prefix}'] = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional: Set how long to reserve the GPU.")
|
| 58 |
|
| 59 |
-
components[f'clip_skip_{prefix}'] = gr.State(value=1)
|
| 60 |
components[f'width_{prefix}'] = gr.State(value=512)
|
| 61 |
components[f'height_{prefix}'] = gr.State(value=512)
|
| 62 |
|
| 63 |
with gr.Column(scale=1):
|
| 64 |
components[f'result_{prefix}'] = gr.Gallery(label="Result", show_label=False, columns=1, object_fit="contain", height=610)
|
| 65 |
|
| 66 |
-
|
| 67 |
components.update(create_lora_settings_ui(prefix))
|
| 68 |
components.update(create_controlnet_ui(prefix))
|
| 69 |
components.update(create_ipadapter_ui(prefix))
|
|
|
|
|
|
|
|
|
|
| 70 |
components.update(create_embedding_ui(prefix))
|
| 71 |
components.update(create_conditioning_ui(prefix))
|
|
|
|
| 72 |
components.update(create_vae_override_ui(prefix))
|
| 73 |
|
| 74 |
return components
|
|
|
|
| 4 |
from .ui_components import (
|
| 5 |
create_lora_settings_ui,
|
| 6 |
create_controlnet_ui, create_ipadapter_ui, create_embedding_ui,
|
| 7 |
+
create_conditioning_ui, create_vae_override_ui,
|
| 8 |
+
create_model_architecture_filter_ui, create_category_filter_ui,
|
| 9 |
+
create_sd3_ipadapter_ui, create_flux1_ipadapter_ui, create_style_ui,
|
| 10 |
+
create_reference_latent_ui
|
| 11 |
)
|
| 12 |
|
| 13 |
def create_ui():
|
|
|
|
| 15 |
components = {}
|
| 16 |
|
| 17 |
with gr.Column():
|
| 18 |
+
components.update(create_model_architecture_filter_ui(prefix))
|
| 19 |
+
|
| 20 |
with gr.Row():
|
| 21 |
+
components.update(create_category_filter_ui(prefix))
|
| 22 |
components[f'base_model_{prefix}'] = gr.Dropdown(
|
| 23 |
label="Base Model",
|
| 24 |
choices=list(MODEL_MAP_CHECKPOINT.keys()),
|
|
|
|
| 32 |
with gr.Column(scale=1):
|
| 33 |
components[f'input_image_{prefix}'] = gr.Image(type="pil", label="Input Image", height=255)
|
| 34 |
with gr.Column(scale=2):
|
| 35 |
+
components[f'prompt_{prefix}'] = gr.Text(label="Prompt", lines=3)
|
| 36 |
+
components[f'neg_prompt_{prefix}'] = gr.Text(label="Negative prompt", lines=3)
|
| 37 |
|
| 38 |
with gr.Row():
|
| 39 |
with gr.Column(scale=1):
|
|
|
|
| 60 |
components[f'seed_{prefix}'] = gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
|
| 61 |
components[f'batch_size_{prefix}'] = gr.Slider(label="Batch Size", minimum=1, maximum=16, step=1, value=1)
|
| 62 |
with gr.Row():
|
| 63 |
+
components[f'clip_skip_{prefix}'] = gr.Slider(label="Clip Skip", minimum=1, maximum=2, step=1, value=1, visible=False, interactive=True)
|
| 64 |
+
components[f'guidance_{prefix}'] = gr.Slider(label="Guidance (FLUX)", minimum=1.0, maximum=10.0, step=0.1, value=3.5, visible=False, interactive=True)
|
| 65 |
components[f'zero_gpu_{prefix}'] = gr.Number(label="ZeroGPU Duration (s)", value=None, placeholder="Default: 60s, Max: 120s", info="Optional: Set how long to reserve the GPU.")
|
| 66 |
|
|
|
|
| 67 |
components[f'width_{prefix}'] = gr.State(value=512)
|
| 68 |
components[f'height_{prefix}'] = gr.State(value=512)
|
| 69 |
|
| 70 |
with gr.Column(scale=1):
|
| 71 |
components[f'result_{prefix}'] = gr.Gallery(label="Result", show_label=False, columns=1, object_fit="contain", height=610)
|
| 72 |
|
| 73 |
+
|
| 74 |
components.update(create_lora_settings_ui(prefix))
|
| 75 |
components.update(create_controlnet_ui(prefix))
|
| 76 |
components.update(create_ipadapter_ui(prefix))
|
| 77 |
+
components.update(create_flux1_ipadapter_ui(prefix))
|
| 78 |
+
components.update(create_sd3_ipadapter_ui(prefix))
|
| 79 |
+
components.update(create_style_ui(prefix))
|
| 80 |
components.update(create_embedding_ui(prefix))
|
| 81 |
components.update(create_conditioning_ui(prefix))
|
| 82 |
+
components.update(create_reference_latent_ui(prefix))
|
| 83 |
components.update(create_vae_override_ui(prefix))
|
| 84 |
|
| 85 |
return components
|