Spaces:
Runtime error
Runtime error
# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py | |
import os | |
from ldm_patched.pfn import model_loading | |
from ldm_patched.modules import model_management | |
import torch | |
import ldm_patched.modules.utils | |
import ldm_patched.utils.path_utils | |
class UpscaleModelLoader: | |
def INPUT_TYPES(s): | |
return {"required": { "model_name": (ldm_patched.utils.path_utils.get_filename_list("upscale_models"), ), | |
}} | |
RETURN_TYPES = ("UPSCALE_MODEL",) | |
FUNCTION = "load_model" | |
CATEGORY = "loaders" | |
def load_model(self, model_name): | |
model_path = ldm_patched.utils.path_utils.get_full_path("upscale_models", model_name) | |
sd = ldm_patched.modules.utils.load_torch_file(model_path, safe_load=True) | |
if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd: | |
sd = ldm_patched.modules.utils.state_dict_prefix_replace(sd, {"module.":""}) | |
out = model_loading.load_state_dict(sd).eval() | |
return (out, ) | |
class ImageUpscaleWithModel: | |
def INPUT_TYPES(s): | |
return {"required": { "upscale_model": ("UPSCALE_MODEL",), | |
"image": ("IMAGE",), | |
}} | |
RETURN_TYPES = ("IMAGE",) | |
FUNCTION = "upscale" | |
CATEGORY = "image/upscaling" | |
def upscale(self, upscale_model, image): | |
device = model_management.get_torch_device() | |
upscale_model.to(device) | |
in_img = image.movedim(-1,-3).to(device) | |
free_memory = model_management.get_free_memory(device) | |
tile = 512 | |
overlap = 32 | |
oom = True | |
while oom: | |
try: | |
steps = in_img.shape[0] * ldm_patched.modules.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap) | |
pbar = ldm_patched.modules.utils.ProgressBar(steps) | |
s = ldm_patched.modules.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar) | |
oom = False | |
except model_management.OOM_EXCEPTION as e: | |
tile //= 2 | |
if tile < 128: | |
raise e | |
upscale_model.cpu() | |
s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0) | |
return (s,) | |
NODE_CLASS_MAPPINGS = { | |
"UpscaleModelLoader": UpscaleModelLoader, | |
"ImageUpscaleWithModel": ImageUpscaleWithModel | |
} | |