diff --git "a/ComfyUI-Impact-Pack/modules/impact/impact_pack.py" "b/ComfyUI-Impact-Pack/modules/impact/impact_pack.py" new file mode 100644--- /dev/null +++ "b/ComfyUI-Impact-Pack/modules/impact/impact_pack.py" @@ -0,0 +1,2633 @@ +import os +import sys + +import torch + +import folder_paths +import comfy.samplers +import comfy.sd +import warnings +from segment_anything import sam_model_registry +from io import BytesIO +import piexif +import math +import zipfile +import re +from PIL import ImageDraw +from server import PromptServer + +from impact.utils import * +import impact.core as core +from impact.core import SEG, NO_BBOX_DETECTOR, NO_SEGM_DETECTOR +from impact.config import MAX_RESOLUTION, latent_letter_path +from PIL import Image +import numpy as np +import hashlib +import json +import safetensors.torch +from PIL.PngImagePlugin import PngInfo +import latent_preview +import comfy.model_management + +warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated') + +model_path = folder_paths.models_dir + + +# Nodes +# folder_paths.supported_pt_extensions +folder_paths.folder_names_and_paths["mmdets_bbox"] = ([os.path.join(model_path, "mmdets", "bbox")], folder_paths.supported_pt_extensions) +folder_paths.folder_names_and_paths["mmdets_segm"] = ([os.path.join(model_path, "mmdets", "segm")], folder_paths.supported_pt_extensions) +folder_paths.folder_names_and_paths["mmdets"] = ([os.path.join(model_path, "mmdets")], folder_paths.supported_pt_extensions) +folder_paths.folder_names_and_paths["sams"] = ([os.path.join(model_path, "sams")], folder_paths.supported_pt_extensions) +folder_paths.folder_names_and_paths["onnx"] = ([os.path.join(model_path, "onnx")], {'.onnx'}) + + +class ONNXDetectorProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model_name": (folder_paths.get_filename_list("onnx"), )}} + + RETURN_TYPES = ("ONNX_DETECTOR", ) + FUNCTION = "load_onnx" + + CATEGORY = "ImpactPack" + + def load_onnx(self, model_name): + model = folder_paths.get_full_path("onnx", model_name) + return (core.ONNXDetector(model), ) + + +class CLIPSegDetectorProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"multiline": False}), + "blur": ("FLOAT", {"min": 0, "max": 15, "step": 0.1, "default": 7}), + "threshold": ("FLOAT", {"min": 0, "max": 1, "step": 0.05, "default": 0.4}), + "dilation_factor": ("INT", {"min": 0, "max": 10, "step": 1, "default": 4}), + } + } + + RETURN_TYPES = ("BBOX_DETECTOR", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, text, blur, threshold, dilation_factor): + try: + import custom_nodes.clipseg + return (core.BBoxDetectorBasedOnCLIPSeg(text, blur, threshold, dilation_factor), ) + except Exception as e: + print("[ERROR] CLIPSegToBboxDetector: CLIPSeg custom node isn't installed. You must install biegert/ComfyUI-CLIPSeg extension to use this node.") + print(f"\t{e}") + pass + + +class SAMLoader: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "model_name": (folder_paths.get_filename_list("sams"), ), + "device_mode": (["AUTO", "Prefer GPU", "CPU"],), + } + } + + RETURN_TYPES = ("SAM_MODEL", ) + FUNCTION = "load_model" + + CATEGORY = "ImpactPack" + + def load_model(self, model_name, device_mode="auto"): + modelname = folder_paths.get_full_path("sams", model_name) + + if 'vit_h' in model_name: + model_kind = 'vit_h' + elif 'vit_l' in model_name: + model_kind = 'vit_l' + else: + model_kind = 'vit_b' + + sam = sam_model_registry[model_kind](checkpoint=modelname) + # Unless user explicitly wants to use CPU, we use GPU + device = comfy.model_management.get_torch_device() if device_mode == "Prefer GPU" else "CPU" + + if device_mode == "Prefer GPU": + sam.to(device=device) + + sam.is_auto_mode = device_mode == "AUTO" + + print(f"Loads SAM model: {modelname} (device:{device_mode})") + return (sam, ) + + +class ONNXDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "onnx_detector": ("ONNX_DETECTOR",), + "image": ("IMAGE",), + "threshold": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), + "crop_factor": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 10, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + OUTPUT_NODE = True + + def doit(self, onnx_detector, image, threshold, dilation, crop_factor, drop_size): + segs = onnx_detector.detect(image, threshold, dilation, crop_factor, drop_size) + return (segs, ) + + +class SEGSDetailer: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE",), + }, + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def do_detail(image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, noise_mask, force_inpaint, basic_pipe): + + model, clip, vae, positive, negative = basic_pipe + + new_segs = [] + + for seg in segs[1]: + cropped_image = seg.cropped_image if seg.cropped_image is not None \ + else crop_ndarray4(image.numpy(), seg.crop_region) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + print(f"Detailer: segment skip [empty mask]") + new_segs.append(seg) + continue + + if noise_mask: + cropped_mask = seg.cropped_mask + else: + cropped_mask = None + + enhanced_pil = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for, max_size, + seg.bbox, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, cropped_mask, force_inpaint) + + new_seg = SEG(enhanced_pil, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label) + new_segs.append(new_seg) + + return segs[0], new_segs + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, noise_mask, force_inpaint, basic_pipe): + + segs = SEGSDetailer.do_detail(image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, noise_mask, force_inpaint, basic_pipe) + + return (segs, ) + + +class SEGSPaste: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + }, + "optional": {"ref_image_opt": ("IMAGE", ),} + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def doit(image, segs, feather, ref_image_opt=None): + image_pil = tensor2pil(image).convert('RGBA') + + for seg in segs[1]: + ref_image_pil = None + if ref_image_opt is None and seg.cropped_image is not None: + ref_image_pil = seg.cropped_image + elif ref_image_opt is not None: + cropped = crop_image(ref_image_opt, seg.crop_region) + cropped = np.clip(255. * cropped.squeeze(), 0, 255).astype(np.uint8) + ref_image_pil = Image.fromarray(cropped).convert('RGBA') + + if ref_image_pil is not None: + mask_pil = feather_mask(seg.cropped_mask, feather) + image_pil.paste(ref_image_pil, (seg.crop_region[0], seg.crop_region[1]), mask_pil) + + image_tensor = pil2tensor(image_pil.convert('RGB')) + + return (image_tensor, ) + + +class SEGSPreview: + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = () + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + OUTPUT_NODE = True + + def doit(self, segs, fallback_image_opt): + full_output_folder, filename, counter, subfolder, filename_prefix = \ + folder_paths.get_save_image_path("impact_seg_preview", self.output_dir, segs[0][1], segs[0][0]) + + results = list() + + for seg in segs[1]: + if seg.cropped_image is not None: + cropped_image = seg.cropped_image + elif fallback_image_opt is not None: + # take from original image + cropped_image = crop_image(fallback_image_opt, seg.crop_region) + cropped_image = Image.fromarray(np.clip(255. * cropped_image.squeeze(), 0, 255).astype(np.uint8)) + + if cropped_image is not None: + file = f"{filename}_{counter:05}_.webp" + cropped_image.save(os.path.join(full_output_folder, file)) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + counter += 1 + + return {"ui": {"images": results}} + + +class SEGSLabelFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "preset": (['all', 'hand', 'face', 'short_sleeved_shirt', 'long_sleeved_shirt', 'short_sleeved_outwear', 'long_sleeved_outwear', 'vest', 'sling', 'shorts', 'trousers', 'skirt', 'short_sleeved_dress', 'long_sleeved_dress', 'vest_dress', 'sling_dress'], ), + "labels": ("STRING", {"multiline": True, "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, preset, labels): + labels = labels.split(',') + labels = set([label.strip() for label in labels]) + + if 'all' in labels: + return (segs, ) + else: + res_segs = [] + for x in segs[1]: + if x.label in labels: + res_segs.append(x) + + return ((segs[0], res_segs), ) + + +class SEGSOrderedFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2"],), + "order": ("BOOLEAN", {"default": True, "label_on": "descending", "label_off": "ascending"}), + "take_start": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "take_count": ("INT", {"default": 1, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, target, order, take_start, take_count): + segs_with_order = [] + + for seg in segs[1]: + x1 = seg.crop_region[0] + y1 = seg.crop_region[1] + x2 = seg.crop_region[2] + y2 = seg.crop_region[3] + + if target == "area(=w*h)": + value = (y2 - y1) * (x2 - x1) + elif target == "width": + value = x2 - x1 + elif target == "height": + value = y2 - y1 + elif target == "x1": + value = x1 + elif target == "x2": + value = x2 + elif target == "y1": + value = y1 + else: + value = y2 + + segs_with_order.append((value, seg)) + + if order: + sorted_list = sorted(segs_with_order, key=lambda x: x[0], reverse=True) + else: + sorted_list = sorted(segs_with_order, key=lambda x: x[0], reverse=False) + + result_list = [item[1] for item in sorted_list[take_start:take_start + take_count]] + return ((segs[0], result_list), ) + + +class SEGSRangeFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2"],), + "mode": ("BOOLEAN", {"default": True, "label_on": "inside", "label_off": "outside"}), + "min_value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "max_value": ("INT", {"default": 67108864, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, target, mode, min_value, max_value): + new_segs = [] + + for seg in segs[1]: + x1 = seg.crop_region[0] + y1 = seg.crop_region[1] + x2 = seg.crop_region[2] + y2 = seg.crop_region[3] + + if target == "area(=w*h)": + value = (y2 - y1) * (x2 - x1) + elif target == "width": + value = x2 - x1 + elif target == "height": + value = y2 - y1 + elif target == "x1": + value = x1 + elif target == "x2": + value = x2 + elif target == "y1": + value = y1 + else: + value = y2 + + if mode and min_value <= value <= max_value: + print(f"[in] value={value} / {mode}, {min_value}, {max_value}") + new_segs.append(seg) + elif not mode and (value < min_value or value > max_value): + print(f"[out] value={value} / {mode}, {min_value}, {max_value}") + new_segs.append(seg) + else: + print(f"[filter] value={value} / {mode}, {min_value}, {max_value}") + + return ((segs[0], new_segs), ) + + +class SEGSToImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, fallback_image_opt=None): + results = list() + + for seg in segs[1]: + if seg.cropped_image is not None: + cropped_image = torch.from_numpy(seg.cropped_image) + elif fallback_image_opt is not None: + # take from original image + cropped_image = torch.from_numpy(crop_image(fallback_image_opt, seg.crop_region)) + else: + cropped_image = empty_pil_tensor() + + results.append(cropped_image) + + if len(results) == 0: + results.append(empty_pil_tensor()) + + return (results,) + + +class SEGSToMaskList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("MASK",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + masks = core.segs_to_masklist(segs) + return (masks,) + + +class SEGSConcat: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs1": ("SEGS", ), + "segs2": ("SEGS", ), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs1, segs2): + if segs1[0] == segs2[0]: + return ((segs1[0], segs1[1] + segs2[1]), ) + else: + print(f"ERROR: source shape of 'segs1' and 'segs2' are different. 'segs2' will be ignored") + return (segs1, ) + + +class DetailerForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard_opt=None): + + image_pil = tensor2pil(image).convert('RGBA') + + enhanced_alpha_list = [] + enhanced_list = [] + cropped_list = [] + + for seg in segs[1]: + cropped_image = seg.cropped_image if seg.cropped_image is not None \ + else crop_ndarray4(image.numpy(), seg.crop_region) + + mask_pil = feather_mask(seg.cropped_mask, feather) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + print(f"Detailer: segment skip [empty mask]") + continue + + if noise_mask: + cropped_mask = seg.cropped_mask + else: + cropped_mask = None + + enhanced_pil = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, + seg.bbox, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, cropped_mask, force_inpaint, wildcard_opt) + + if not (enhanced_pil is None): + # don't latent composite-> converting to latent caused poor quality + # use image paste + image_pil.paste(enhanced_pil, (seg.crop_region[0], seg.crop_region[1]), mask_pil) + enhanced_list.append(pil2tensor(enhanced_pil)) + + if not (enhanced_pil is None): + # Convert enhanced_pil_alpha to RGBA mode + enhanced_pil_alpha = enhanced_pil.copy().convert('RGBA') + + # Apply the mask + mask_array = seg.cropped_mask.astype(np.uint8) * 255 + mask_image = Image.fromarray(mask_array, mode='L').resize(enhanced_pil_alpha.size) + enhanced_pil_alpha.putalpha(mask_image) + enhanced_alpha_list.append(pil2tensor(enhanced_pil_alpha)) + + cropped_list.append(torch.from_numpy(cropped_image)) + + image_tensor = pil2tensor(image_pil.convert('RGB')) + + cropped_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_alpha_list.sort(key=lambda x: x.shape, reverse=True) + + return image_tensor, cropped_list, enhanced_list, enhanced_alpha_list + + def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint): + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint) + + return (enhanced_img, ) + + +class DetailerForEachPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE", ) + }, + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, basic_pipe): + + model, clip, vae, positive, negative = basic_pipe + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint) + + return (enhanced_img, ) + + +class KSamplerProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "basic_pipe": ("BASIC_PIPE", ) + }, + } + + RETURN_TYPES = ("KSAMPLER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + def doit(self, seed, steps, cfg, sampler_name, scheduler, denoise, basic_pipe): + model, _, _, positive, negative = basic_pipe + sampler = core.KSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise) + return (sampler, ) + + +class KSamplerAdvancedProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "basic_pipe": ("BASIC_PIPE", ) + }, + } + + RETURN_TYPES = ("KSAMPLER_ADVANCED",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + def doit(self, cfg, sampler_name, scheduler, basic_pipe): + model, _, _, positive, negative = basic_pipe + sampler = core.KSamplerAdvancedWrapper(model, cfg, sampler_name, scheduler, positive, negative) + return (sampler, ) + + +class TwoSamplersForMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latent_image": ("LATENT", ), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ) + }, + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + def doit(self, latent_image, base_sampler, mask_sampler, mask): + inv_mask = torch.where(mask != 1.0, torch.tensor(1.0), torch.tensor(0.0)) + + latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample(latent_image) + + new_latent_image['noise_mask'] = mask + new_latent_image = mask_sampler.sample(new_latent_image) + + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class TwoAdvancedSamplersForMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "samples": ("LATENT", ), + "base_sampler": ("KSAMPLER_ADVANCED", ), + "mask_sampler": ("KSAMPLER_ADVANCED", ), + "mask": ("MASK", ), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000}) + }, + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def mask_erosion(samples, mask, grow_mask_by): + mask = mask.clone() + + w = samples['samples'].shape[3] + h = samples['samples'].shape[2] + + mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), mode="bilinear") + if grow_mask_by == 0: + mask_erosion = mask2 + else: + kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by)) + padding = math.ceil((grow_mask_by - 1) / 2) + + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1) + + return mask_erosion[:, :, :w, :h].round() + + def doit(self, seed, steps, denoise, samples, base_sampler, mask_sampler, mask, overlap_factor): + + inv_mask = torch.where(mask != 1.0, torch.tensor(1.0), torch.tensor(0.0)) + + adv_steps = int(steps / denoise) + start_at_step = adv_steps - steps + + new_latent_image = samples.copy() + + mask_erosion = TwoAdvancedSamplersForMask.mask_erosion(samples, mask, overlap_factor) + + for i in range(start_at_step, adv_steps): + add_noise = "enable" if i == start_at_step else "disable" + return_with_leftover_noise = "enable" if i+1 != adv_steps else "disable" + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(add_noise, seed, adv_steps, new_latent_image, i, i + 1, "enable") + + new_latent_image['noise_mask'] = mask_erosion + new_latent_image = mask_sampler.sample_advanced("disable", seed, adv_steps, new_latent_image, i, i + 1, return_with_leftover_noise) + + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class RegionalPrompt: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + "advanced_sampler": ("KSAMPLER_ADVANCED", ), + }, + } + + RETURN_TYPES = ("REGIONAL_PROMPTS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/experimental" + + def doit(self, mask, advanced_sampler): + regional_prompt = core.REGIONAL_PROMPT(mask, advanced_sampler) + return ([regional_prompt], ) + + +class CombineRegionalPrompts: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "regional_prompts1": ("REGIONAL_PROMPTS", ), + "regional_prompts2": ("REGIONAL_PROMPTS", ), + }, + } + + RETURN_TYPES = ("REGIONAL_PROMPTS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/experimental" + + def doit(self, regional_prompts1, regional_prompts2): + return (regional_prompts1 + regional_prompts2, ) + + +class RegionalSampler: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "samples": ("LATENT", ), + "base_sampler": ("KSAMPLER_ADVANCED", ), + "regional_prompts": ("REGIONAL_PROMPTS", ), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000}) + }, + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/experimental" + + @staticmethod + def mask_erosion(samples, mask, grow_mask_by): + mask = mask.clone() + + w = samples['samples'].shape[3] + h = samples['samples'].shape[2] + + mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), mode="bilinear") + if grow_mask_by == 0: + mask_erosion = mask2 + else: + kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by)) + padding = math.ceil((grow_mask_by - 1) / 2) + + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1) + + return mask_erosion[:, :, :w, :h].round() + + def doit(self, seed, steps, denoise, samples, base_sampler, regional_prompts, overlap_factor): + + masks = [regional_prompt.mask.numpy() for regional_prompt in regional_prompts] + masks = [np.ceil(mask).astype(np.int32) for mask in masks] + combined_mask = torch.from_numpy(np.bitwise_or.reduce(masks)) + + inv_mask = torch.where(combined_mask == 0, torch.tensor(1.0), torch.tensor(0.0)) + + adv_steps = int(steps / denoise) + start_at_step = adv_steps - steps + + new_latent_image = samples.copy() + + for i in range(start_at_step, adv_steps): + add_noise = "enable" if i == start_at_step else "disable" + return_with_leftover_noise = "enable" if i+1 != adv_steps else "disable" + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(add_noise, seed, adv_steps, new_latent_image, i, i + 1, "enable") + + for regional_prompt in regional_prompts: + new_latent_image['noise_mask'] = regional_prompt.get_mask_erosion(overlap_factor) + new_latent_image = regional_prompt.sampler.sample_advanced("disable", seed, adv_steps, new_latent_image, + i, i + 1, return_with_leftover_noise) + + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class FaceDetailer: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), + "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + + "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), + "sam_dilation": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), + "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), + + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True}), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR", ), + }} + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", ) + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe") + OUTPUT_IS_LIST = (False, True, True, False, False) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Simple" + + @staticmethod + def enhance_face(image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, + bbox_detector, segm_detector=None, sam_model_opt=None, wildcard_opt=None): + # make default prompt as 'face' if empty prompt for CLIPSeg + bbox_detector.setAux('face') + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size) + bbox_detector.setAux(None) + + # bbox + sam combination + if sam_model_opt is not None: + sam_mask = core.make_sam_mask(sam_model_opt, segs, image, sam_detection_hint, sam_dilation, + sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, ) + segs = core.segs_bitwise_and_mask(segs, sam_mask) + + elif segm_detector is not None: + segm_segs = segm_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size) + segm_mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, segm_mask) + + enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard_opt) + + # Mask Generator + mask = core.segs_to_combined_mask(segs) + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, + + def doit(self, image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, wildcard, sam_model_opt=None, segm_detector_opt=None): + + enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask = FaceDetailer.enhance_face( + image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector_opt, sam_model_opt, wildcard) + + pipe = (model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt) + return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, pipe + + +class LatentPixelScale: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "scale_method": (s.upscale_methods,), + "scale_factor": ("FLOAT", {"default": 1.5, "min": 0.1, "max": 10000, "step": 0.1}), + "vae": ("VAE", ), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, samples, scale_method, scale_factor, vae, use_tiled_vae, upscale_model_opt=None): + if upscale_model_opt is None: + latent = core.latent_upscale_on_pixel_space(samples, scale_method, scale_factor, vae, use_tile=use_tiled_vae) + else: + latent = core.latent_upscale_on_pixel_space_with_model(samples, scale_method, upscale_model_opt, scale_factor, vae, use_tile=use_tiled_vae) + return (latent,) + + +class CfgScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_cfg": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_cfg): + hook = None + if schedule_for_iteration == "simple": + hook = core.SimpleCfgScheduleHook(target_cfg) + + return (hook, ) + + +class DenoiseScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_denoise": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 100.0}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_denoise): + hook = None + if schedule_for_iteration == "simple": + hook = core.SimpleDenoiseScheduleHook(target_denoise) + + return (hook, ) + + +class PixelKSampleHookCombine: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "hook1": ("PK_HOOK",), + "hook2": ("PK_HOOK",), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, hook1, hook2): + hook = core.PixelKSampleHookCombine(hook1, hook2) + return (hook, ) + + +class TiledKSamplerProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + "basic_pipe": ("BASIC_PIPE", ) + }} + + RETURN_TYPES = ("KSAMPLER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + def doit(self, seed, steps, cfg, sampler_name, scheduler, denoise, + tile_width, tile_height, tiling_strategy, basic_pipe): + model, _, _, positive, negative = basic_pipe + sampler = core.TiledKSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy) + return (sampler, ) + + +class PixelTiledKSampleUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "model": ("MODEL",), + "vae": ("VAE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt=None, pk_hook_opt=None): + try: + import custom_nodes.ComfyUI_TiledKSampler.nodes + upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt) + return (upscaler, ) + except Exception as e: + print("[ERROR] PixelTiledKSampleUpscalerProvider: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") + print(f"\t{e}") + pass + + +class PixelTiledKSampleUpscalerProviderPipe: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + "basic_pipe": ("BASIC_PIPE",) + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, tile_width, tile_height, tiling_strategy, basic_pipe, upscale_model_opt=None, pk_hook_opt=None): + try: + import custom_nodes.ComfyUI_TiledKSampler.nodes + model, _, vae, positive, negative = basic_pipe + upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt) + return (upscaler, ) + except Exception as e: + print("[ERROR] PixelTiledKSampleUpscalerProviderPipe: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") + print(f"\t{e}") + pass + + +class PixelKSampleUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "model": ("MODEL",), + "vae": ("VAE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + use_tiled_vae, upscale_model_opt=None, pk_hook_opt=None): + upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt) + return (upscaler, ) + + +class PixelKSampleUpscalerProviderPipe(PixelKSampleUpscalerProvider): + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE",) + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit_pipe" + + CATEGORY = "ImpactPack/Upscale" + + def doit_pipe(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, + use_tiled_vae, basic_pipe, upscale_model_opt=None, pk_hook_opt=None): + model, _, vae, positive, negative = basic_pipe + upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt) + return (upscaler, ) + + +class TwoSamplersForMaskUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "full_sample_schedule": ( + ["none", "interleave1", "interleave2", "interleave3", + "last1", "last2", + "interleave1+last1", "interleave2+last1", "interleave3+last1", + ],), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ), + "vae": ("VAE",), + }, + "optional": { + "full_sampler_opt": ("KSAMPLER",), + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_base_opt": ("PK_HOOK", ), + "pk_hook_mask_opt": ("PK_HOOK", ), + "pk_hook_full_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae, + full_sampler_opt=None, upscale_model_opt=None, + pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None): + upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, + base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, + pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt) + return (upscaler, ) + + +class TwoSamplersForMaskUpscalerProviderPipe: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "full_sample_schedule": ( + ["none", "interleave1", "interleave2", "interleave3", + "last1", "last2", + "interleave1+last1", "interleave2+last1", "interleave3+last1", + ],), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ), + "basic_pipe": ("BASIC_PIPE",), + }, + "optional": { + "full_sampler_opt": ("KSAMPLER",), + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_base_opt": ("PK_HOOK", ), + "pk_hook_mask_opt": ("PK_HOOK", ), + "pk_hook_full_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, basic_pipe, + full_sampler_opt=None, upscale_model_opt=None, + pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None): + _, _, vae, _, _ = basic_pipe + upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, + base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, + pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt) + return (upscaler, ) + + +class IterativeLatentUpscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), + "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), + "temp_prefix": ("STRING", {"default": ""}), + "upscaler": ("UPSCALER",) + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("LATENT",) + RETURN_NAMES = ("latent",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, samples, upscale_factor, steps, temp_prefix, upscaler, unique_id): + w = samples['samples'].shape[3]*8 # image width + h = samples['samples'].shape[2]*8 # image height + + if temp_prefix == "": + temp_prefix = None + + upscale_factor_unit = max(0, (upscale_factor-1.0)/steps) + current_latent = samples + scale = 1 + + for i in range(steps-1): + scale += upscale_factor_unit + new_w = w*scale + new_h = h*scale + core.update_node_status(unique_id, f"{i+1}/{steps} steps | x{scale:.2f}", (i+1)/steps) + print(f"IterativeLatentUpscale[{i+1}/{steps}]: {new_w:.1f}x{new_h:.1f} (scale:{scale:.2f}) ") + step_info = i, steps + current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) + + if scale < upscale_factor: + new_w = w*upscale_factor + new_h = h*upscale_factor + core.update_node_status(unique_id, f"Final step | x{upscale_factor:.2f}", 1.0) + print(f"IterativeLatentUpscale[Final]: {new_w:.1f}x{new_h:.1f} (scale:{upscale_factor:.2f}) ") + step_info = steps, steps + current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) + + core.update_node_status(unique_id, "", None) + + return (current_latent, ) + + +class IterativeImageUpscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "pixels": ("IMAGE", ), + "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), + "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), + "temp_prefix": ("STRING", {"default": ""}), + "upscaler": ("UPSCALER",), + "vae": ("VAE",), + }, + "hidden": {"unique_id": "UNIQUE_ID"} + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, pixels, upscale_factor, steps, temp_prefix, upscaler, vae, unique_id): + if temp_prefix == "": + temp_prefix = None + + core.update_node_status(unique_id, "VAEEncode (first)", 0) + if upscaler.is_tiled: + latent = nodes.VAEEncodeTiled().encode(vae, pixels)[0] + else: + latent = nodes.VAEEncode().encode(vae, pixels)[0] + + refined_latent = IterativeLatentUpscale().doit(latent, upscale_factor, steps, temp_prefix, upscaler, unique_id) + + core.update_node_status(unique_id, "VAEDecode (final)", 1.0) + if upscaler.is_tiled: + pixels = nodes.VAEDecodeTiled().decode(vae, refined_latent[0])[0] + else: + pixels = nodes.VAEDecode().decode(vae, refined_latent[0])[0] + + core.update_node_status(unique_id, "", None) + + return (pixels, ) + + +class FaceDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "detailer_pipe": ("DETAILER_PIPE",), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), + "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + + "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), + "sam_dilation": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), + "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), + + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + }, + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", ) + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe") + OUTPUT_IS_LIST = (False, True, True, False, False) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Simple" + + def doit(self, image, detailer_pipe, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, + sam_mask_hint_threshold, sam_mask_hint_use_negative, drop_size): + + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector, sam_model_opt = detailer_pipe + + enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask = FaceDetailer.enhance_face( + image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector, sam_model_opt, wildcard) + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, detailer_pipe + + +class DetailerForEachTest(DetailerForEach): + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", ) + RETURN_NAMES = ("image", "cropped", "cropped_refined", "cropped_refined_alpha", ) + OUTPUT_IS_LIST = (False, True, True, True, ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint): + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint) + + # set fallback image + if len(cropped) == 0: + cropped = [empty_pil_tensor()] + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + return enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha + + +class DetailerForEachTestPipe(DetailerForEachPipe): + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", ) + RETURN_NAMES = ("image", "cropped", "cropped_refined", "cropped_refined_alpha", ) + OUTPUT_IS_LIST = (False, True, True, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, basic_pipe): + + model, clip, vae, positive, negative = basic_pipe + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint) + + # set fallback image + if len(cropped) == 0: + cropped = [empty_pil_tensor()] + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + return enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha + + +class EmptySEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {}, } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self): + shape = 0, 0 + return ((shape, []),) + + +class SegsToCombinedMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs): + return (core.segs_to_combined_mask(segs), ) + + +class SegsBitwiseAndMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs, mask): + return (core.segs_bitwise_and_mask(segs, mask), ) + + +class SegsBitwiseAndMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "masks": ("MASKS",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs, masks): + return (core.apply_mask_to_each_seg(segs, masks), ) + + +class BitwiseAndMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "base_segs": ("SEGS",), + "mask_segs": ("SEGS",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, base_segs, mask_segs): + + result = [] + + for bseg in base_segs[1]: + cropped_mask1 = bseg.cropped_mask.copy() + crop_region1 = bseg.crop_region + + for mseg in mask_segs[1]: + cropped_mask2 = mseg.cropped_mask + crop_region2 = mseg.crop_region + + # compute the intersection of the two crop regions + intersect_region = (max(crop_region1[0], crop_region2[0]), + max(crop_region1[1], crop_region2[1]), + min(crop_region1[2], crop_region2[2]), + min(crop_region1[3], crop_region2[3])) + + overlapped = False + + # set all pixels in cropped_mask1 to 0 except for those that overlap with cropped_mask2 + for i in range(intersect_region[0], intersect_region[2]): + for j in range(intersect_region[1], intersect_region[3]): + if cropped_mask1[j - crop_region1[1], i - crop_region1[0]] == 1 and \ + cropped_mask2[j - crop_region2[1], i - crop_region2[0]] == 1: + # pixel overlaps with both masks, keep it as 1 + overlapped = True + pass + else: + # pixel does not overlap with both masks, set it to 0 + cropped_mask1[j - crop_region1[1], i - crop_region1[0]] = 0 + + if overlapped: + item = SEG(bseg.cropped_image, cropped_mask1, bseg.confidence, bseg.crop_region, bseg.bbox, bseg.label) + result.append(item) + + return ((base_segs[0], result),) + + +class SubtractMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "base_segs": ("SEGS",), + "mask_segs": ("SEGS",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, base_segs, mask_segs): + + result = [] + + for bseg in base_segs[1]: + cropped_mask1 = bseg.cropped_mask.copy() + crop_region1 = bseg.crop_region + + for mseg in mask_segs[1]: + cropped_mask2 = mseg.cropped_mask + crop_region2 = mseg.crop_region + + # compute the intersection of the two crop regions + intersect_region = (max(crop_region1[0], crop_region2[0]), + max(crop_region1[1], crop_region2[1]), + min(crop_region1[2], crop_region2[2]), + min(crop_region1[3], crop_region2[3])) + + changed = False + + # subtract operation + for i in range(intersect_region[0], intersect_region[2]): + for j in range(intersect_region[1], intersect_region[3]): + if cropped_mask1[j - crop_region1[1], i - crop_region1[0]] == 1 and \ + cropped_mask2[j - crop_region2[1], i - crop_region2[0]] == 1: + # pixel overlaps with both masks, set it as 0 + changed = True + cropped_mask1[j - crop_region1[1], i - crop_region1[0]] = 0 + else: + # pixel does not overlap with both masks, don't care + pass + + if changed: + item = SEG(bseg.cropped_image, cropped_mask1, bseg.confidence, bseg.crop_region, bseg.bbox, bseg.label) + result.append(item) + else: + result.append(base_segs) + + return ((base_segs[0], result),) + + +class MaskToSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "combined": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask, combined, crop_factor, bbox_fill, drop_size): + result = core.mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size) + return (result, ) + + +class MasksToMaskList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "masks": ("MASKS", ), + } + } + + RETURN_TYPES = ("MASK", ) + OUTPUT_IS_LIST = (True, ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, masks): + if masks is None: + empty_mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") + return ([empty_mask], ) + + res = [] + + for mask in masks: + res.append(mask) + + print(f"mask len: {len(res)}") + + return (res, ) + + +class ToBinaryMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "threshold": ("INT", {"default": 20, "min": 1, "max": 255}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask, threshold): + mask = to_binary_mask(mask, threshold/255.0) + return (mask,) + + +class BitwiseAndMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK",), + "mask2": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = bitwise_and_masks(mask1, mask2) + return (mask,) + + +class SubtractMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK", ), + "mask2": ("MASK", ), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = subtract_masks(mask1, mask2) + return (mask,) + + +class AddMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK",), + "mask2": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = add_masks(mask1, mask2) + return (mask,) + + +import nodes + + +def get_image_hash(arr): + split_index1 = arr.shape[0] // 2 + split_index2 = arr.shape[1] // 2 + part1 = arr[:split_index1, :split_index2] + part2 = arr[:split_index1, split_index2:] + part3 = arr[split_index1:, :split_index2] + part4 = arr[split_index1:, split_index2:] + + # 각 부분을 합산 + sum1 = np.sum(part1) + sum2 = np.sum(part2) + sum3 = np.sum(part3) + sum4 = np.sum(part4) + + return hash((sum1, sum2, sum3, sum4)) + + +class PreviewBridge(nodes.PreviewImage): + @classmethod + def INPUT_TYPES(s): + return {"required": {"images": ("IMAGE",), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}, + "optional": {"image": (["#placeholder"], )}, + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def __init__(self): + super().__init__() + self.prev_hash = None + + def doit(self, images, image, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None, unique_id=None): + if image != "#placeholder" and isinstance(image, str): + image_path = folder_paths.get_annotated_filepath(image) + img = Image.open(image_path).convert("RGB") + data = np.array(img) + image_hash = get_image_hash(data) + else: + data = (255. * images[0].cpu().numpy()).astype(int) + image_hash = get_image_hash(data) + + is_changed = False + if self.prev_hash is None or self.prev_hash != image_hash: + self.prev_hash = image_hash + is_changed = True + + if is_changed or image == "#placeholder": + # new input image + res = self.save_images(images, filename_prefix, prompt, extra_pnginfo) + + item = res['ui']['images'][0] + + if not item['filename'].endswith(']'): + filepath = f"{item['filename']} [{item['type']}]" + else: + filepath = item['filename'] + + image, mask = nodes.LoadImage().load_image(filepath) + + res['ui']['aux'] = [image_hash, res['ui']['images']] + res['result'] = (image, mask, ) + + return res + + else: + # new mask + if '0' in image: # fallback + image = image['0'] + + forward = {'filename': image['forward_filename'], + 'subfolder': image['forward_subfolder'], + 'type': image['forward_type'], } + + res = {'ui': {'images': [forward]}} + + imgpath = "" + if 'subfolder' in image and image['subfolder'] != "": + imgpath = image['subfolder'] + "/" + + imgpath += f"{image['filename']}" + + if 'type' in image and image['type'] != "": + imgpath += f" [{image['type']}]" + + res['ui']['aux'] = [image_hash, [forward]] + res['result'] = nodes.LoadImage().load_image(imgpath) + + return res + + +class ImageReceiver(nodes.LoadImage): + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] + return {"required": { + "image": (sorted(files), ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, image, link_id): + return nodes.LoadImage().load_image(image) + + @classmethod + def VALIDATE_INPUTS(s, image, link_id): + if not folder_paths.exists_annotated_filepath(image) or image.startswith("/") or ".." in image: + return "Invalid image file: {}".format(image) + + return True + + +from server import PromptServer + +class ImageSender(nodes.PreviewImage): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "ImgSender"}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, images, filename_prefix="ImgSender", link_id=0, prompt=None, extra_pnginfo=None): + result = nodes.PreviewImage().save_images(images, filename_prefix, prompt, extra_pnginfo) + PromptServer.instance.send_sync("img-send", {"link_id": link_id, "images": result['ui']['images']}) + return result + + +class LatentReceiver: + def __init__(self): + self.input_dir = folder_paths.get_input_directory() + self.type = "input" + + @classmethod + def INPUT_TYPES(s): + def check_file_extension(x): + return x.endswith(".latent") or x.endswith(".latent.png") + + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and check_file_extension(f)] + return {"required": { + "latent": (sorted(files), ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT",) + + @staticmethod + def load_preview_latent(image_path): + image = Image.open(image_path) + exif_data = piexif.load(image.info["exif"]) + + if piexif.ExifIFD.UserComment in exif_data["Exif"]: + compressed_data = exif_data["Exif"][piexif.ExifIFD.UserComment] + compressed_data_io = BytesIO(compressed_data) + with zipfile.ZipFile(compressed_data_io, mode='r') as archive: + tensor_bytes = archive.read("latent") + tensor = safetensors.torch.load(tensor_bytes) + return {"samples": tensor['latent_tensor']} + return None + + def parse_filename(self, filename): + pattern = r"^(.*)/(.*?)\[(.*)\]\s*$" + match = re.match(pattern, filename) + if match: + subfolder = match.group(1) + filename = match.group(2).rstrip() + file_type = match.group(3) + else: + subfolder = '' + file_type = self.type + + return {'filename': filename, 'subfolder': subfolder, 'type': file_type} + + def doit(self, latent, link_id): + latent_name = latent + latent_path = folder_paths.get_annotated_filepath(latent_name) + + if latent.endswith(".latent"): + latent = safetensors.torch.load_file(latent_path, device="cpu") + multiplier = 1.0 + if "latent_format_version_0" not in latent: + multiplier = 1.0 / 0.18215 + samples = {"samples": latent["latent_tensor"].float() * multiplier} + else: + samples = LatentReceiver.load_preview_latent(latent_path) + + preview = self.parse_filename(latent_name) + + return { + 'ui': {"images": [preview]}, + 'result': (samples, ) + } + + @classmethod + def IS_CHANGED(s, latent, link_id): + image_path = folder_paths.get_annotated_filepath(latent) + m = hashlib.sha256() + with open(image_path, 'rb') as f: + m.update(f.read()) + return m.digest().hex() + + @classmethod + def VALIDATE_INPUTS(s, latent, link_id): + if not folder_paths.exists_annotated_filepath(latent) or latent.startswith("/") or ".." in latent: + return "Invalid latent file: {}".format(latent) + return True + + +class LatentSender(nodes.SaveLatent): + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "filename_prefix": ("STRING", {"default": "latents/LatentSender"}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + OUTPUT_NODE = True + + RETURN_TYPES = () + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def save_to_file(tensor_bytes, prompt, extra_pnginfo, image, image_path): + compressed_data = BytesIO() + with zipfile.ZipFile(compressed_data, mode='w') as archive: + archive.writestr("latent", tensor_bytes) + image = image.copy() + exif_data = {"Exif": {piexif.ExifIFD.UserComment: compressed_data.getvalue()}} + + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + exif_bytes = piexif.dump(exif_data) + image.save(image_path, format='png', exif=exif_bytes, pnginfo=metadata, optimize=True) + + @staticmethod + def prepare_preview(latent_tensor): + lower_bound = 128 + upper_bound = 256 + + previewer = core.get_previewer("cpu", force=True) + image = previewer.decode_latent_to_preview(latent_tensor) + min_size = min(image.size[0], image.size[1]) + max_size = max(image.size[0], image.size[1]) + + scale_factor = 1 + if max_size > upper_bound: + scale_factor = upper_bound/max_size + + # prevent too small preview + if min_size*scale_factor < lower_bound: + scale_factor = lower_bound/min_size + + w = int(image.size[0] * scale_factor) + h = int(image.size[1] * scale_factor) + + image = image.resize((w, h), resample=Image.NEAREST) + + return LatentSender.attach_format_text(image) + + @staticmethod + def attach_format_text(image): + width_a, height_a = image.size + + letter_image = Image.open(latent_letter_path) + width_b, height_b = letter_image.size + + new_width = max(width_a, width_b) + new_height = height_a + height_b + + new_image = Image.new('RGB', (new_width, new_height), (0, 0, 0)) + + offset_x = (new_width - width_b) // 2 + offset_y = (height_a + (new_height - height_a - height_b) // 2) + new_image.paste(letter_image, (offset_x, offset_y)) + + new_image.paste(image, (0, 0)) + + return new_image + + def doit(self, samples, filename_prefix="latents/LatentSender", link_id=0, prompt=None, extra_pnginfo=None): + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + + # load preview + preview = LatentSender.prepare_preview(samples['samples']) + + # support save metadata for latent sharing + file = f"{filename}_{counter:05}_.latent.png" + fullpath = os.path.join(full_output_folder, file) + + output = {"latent_tensor": samples["samples"]} + + tensor_bytes = safetensors.torch.save(output) + LatentSender.save_to_file(tensor_bytes, prompt, extra_pnginfo, preview, fullpath) + + latent_path = { + 'filename': file, + 'subfolder': subfolder, + 'type': self.type + } + + PromptServer.instance.send_sync("latent-send", {"link_id": link_id, "images": [latent_path]}) + + return {'ui': {'images': [latent_path]}} + + +class ImageMaskSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1}), + "images1": ("IMAGE", ), + }, + + "optional": { + "mask1_opt": ("MASK",), + "images2_opt": ("IMAGE",), + "mask2_opt": ("MASK",), + "images3_opt": ("IMAGE",), + "mask3_opt": ("MASK",), + "images4_opt": ("IMAGE",), + "mask4_opt": ("MASK",), + }, + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, images1, mask1_opt=None, images2_opt=None, mask2_opt=None, images3_opt=None, mask3_opt=None, images4_opt=None, mask4_opt=None): + if select == 1: + return images1, mask1_opt, + elif select == 2: + return images2_opt, mask2_opt, + elif select == 3: + return images3_opt, mask3_opt, + else: + return images4_opt, mask4_opt, + + +class LatentSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1}), + "latent1": ("LATENT",), + }, + + "optional": { + "latent2_opt": ("LATENT",), + "latent3_opt": ("LATENT",), + "latent4_opt": ("LATENT",), + }, + } + + RETURN_TYPES = ("LATENT", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, latent1, latent2_opt=None, latent3_opt=None, latent4_opt=None): + if select == 1: + return (latent1,) + elif select == 2: + return (latent2_opt,) + elif select == 3: + return (latent3_opt,) + else: + return (latent4_opt,) + + +class SEGSSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1}), + "segs": ("SEGS",), + }, + + "optional": { + "segs2_opt": ("SEGS",), + "segs3_opt": ("SEGS",), + "segs4_opt": ("SEGS",), + }, + } + + RETURN_TYPES = ("SEGS", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, segs, segs2_opt=None, segs3_opt=None, segs4_opt=None): + if select == 1: + return (segs,) + elif select == 2: + return (segs2_opt,) + elif select == 3: + return (segs3_opt,) + else: + return (segs4_opt,) + + +# class SEGPick: +# @classmethod +# def INPUT_TYPES(s): +# return {"required": { +# "select": ("INT", {"default": 1, "min": 1, "max": 99999, "step": 1}), +# "segs": ("SEGS",), +# }, +# } +# +# RETURN_TYPES = ("SEGS", ) +# +# OUTPUT_NODE = True +# +# FUNCTION = "doit" +# +# CATEGORY = "ImpactPack/Util" +# +# def doit(self, select, segs): +# if select == 1: +# return (segs,) +# elif select == 2: +# return (segs2_opt,) +# elif select == 3: +# return (segs3_opt,) +# else: +# return (segs4_opt,) + + +class SaveConditioning: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning": ("CONDITIONING", ), + "filename_prefix": ("STRING", {"default": "conditioning/ComfyUI"}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "doit" + + OUTPUT_NODE = True + + CATEGORY = "_for_testing" + + def doit(self, conditioning, filename_prefix, prompt=None, extra_pnginfo=None): + # support save metadata for latent sharing + prompt_info = "" + if prompt is not None: + prompt_info = json.dumps(prompt) + + for tensor_data, meta_data in conditioning: + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + + metadata = {"prompt": prompt_info} + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata[x] = json.dumps(extra_pnginfo[x]) + + file = f"{filename}_{counter:05}_.conditioning" + file = os.path.join(full_output_folder, file) + + print(f"meta_data:{meta_data}") + print(f"tensor_data:{tensor_data}") + + output = {"conditioning": tensor_data} + metadata['conditioning_aux'] = json.dumps(meta_data) + + safetensors.torch.save_file(output, file, metadata=metadata) + + return {} + + +class LoadConditioning: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".conditioning")] + return {"required": {"conditioning": [sorted(files), ]}, } + + CATEGORY = "_for_testing" + + RETURN_TYPES = ("CONDITIONING", ) + FUNCTION = "load" + + def load(self, conditioning): + conditioning_path = folder_paths.get_annotated_filepath(conditioning) + data = safetensors.torch.load_file(conditioning_path, device="cpu") + return ([[data['conditioning'], {}]], ) + + @classmethod + def IS_CHANGED(s, conditioning): + image_path = folder_paths.get_annotated_filepath(conditioning) + m = hashlib.sha256() + with open(image_path, 'rb') as f: + m.update(f.read()) + return m.digest().hex() + + @classmethod + def VALIDATE_INPUTS(s, conditioning): + if not folder_paths.exists_annotated_filepath(conditioning): + return "Invalid conditioning file: {}".format(conditioning) + return True + + +class ImpactWildcardProcessor: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "wildcard_text": ("STRING", {"multiline": True}), + "populated_text": ("STRING", {"multiline": True}), + "mode": ("BOOLEAN", {"default": True, "label_on": "Populate", "label_off": "Fixed"}), + }, + } + + CATEGORY = "ImpactPack/Prompt" + + RETURN_TYPES = ("STRING", ) + FUNCTION = "doit" + + def doit(self, wildcard_text, populated_text, mode): + return (populated_text, ) + + +class ReencodeLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],), + "input_vae": ("VAE", ), + "output_vae": ("VAE", ), + }, + } + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + def doit(self, samples, tile_mode, input_vae, output_vae): + if tile_mode in ["Both", "Decode(input) only"]: + pixels = nodes.VAEDecodeTiled().decode(input_vae, samples)[0] + else: + pixels = nodes.VAEDecode().decode(input_vae, samples)[0] + + if tile_mode in ["Both", "Encode(output) only"]: + return nodes.VAEEncodeTiled().encode(output_vae, pixels) + else: + return nodes.VAEEncode().encode(output_vae, pixels) + + +class ReencodeLatentPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],), + "input_basic_pipe": ("BASIC_PIPE", ), + "output_basic_pipe": ("BASIC_PIPE", ), + }, + } + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + def doit(self, samples, tile_mode, input_basic_pipe, output_basic_pipe): + _, _, input_vae, _, _ = input_basic_pipe + _, _, output_vae, _, _ = output_basic_pipe + return ReencodeLatent().doit(samples, tile_mode, input_vae, output_vae) + + +class KSamplerBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"basic_pipe": ("BASIC_PIPE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "latent_image": ("LATENT", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE") + FUNCTION = "sample" + + CATEGORY = "sampling" + + def sample(self, basic_pipe, seed, steps, cfg, sampler_name, scheduler, latent_image, denoise=1.0): + model, clip, vae, positive, negative = basic_pipe + latent = nodes.KSampler().sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise)[0] + return (basic_pipe, latent, vae) + + +class KSamplerAdvancedBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"basic_pipe": ("BASIC_PIPE",), + "add_noise": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "latent_image": ("LATENT", ), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE") + FUNCTION = "sample" + + CATEGORY = "sampling" + + def sample(self, basic_pipe, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0): + model, clip, vae, positive, negative = basic_pipe + + if add_noise: + add_noise = "enabled" + else: + add_noise = "disabled" + + if return_with_leftover_noise: + return_with_leftover_noise = "enabled" + else: + return_with_leftover_noise = "disabled" + + latent = nodes.KSamplerAdvanced().sample(model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise)[0] + return (basic_pipe, latent, vae) + + +class ImpactLogger: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"default": "", "forceInput": True}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + CATEGORY = "ImpactPack/Debug" + + OUTPUT_NODE = True + + RETURN_TYPES = () + FUNCTION = "doit" + + def doit(self, text, prompt, extra_pnginfo): + print(f"[IMPACT LOGGER]: {text}") + + print(f" PROMPT: {prompt}") + + # for x in prompt: + # if 'inputs' in x and 'populated_text' in x['inputs']: + # print(f"PROMP: {x['10']['inputs']['populated_text']}") + # + # for x in extra_pnginfo['workflow']['nodes']: + # if x['type'] == 'ImpactWildcardProcessor': + # print(f" WV : {x['widgets_values'][1]}\n") + + return {} + + +class ImageBatchToImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": { "image": ("IMAGE",), } } + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, image): + images = [image[i:i + 1, ...] for i in range(image.shape[0])] + return (images, )