import os import numpy as np import torch import torch.nn.functional as F from torchvision.transforms.functional import normalize from PIL import Image, ImageOps, ImageSequence from typing import List from pathlib import Path from huggingface_hub import snapshot_download, hf_hub_download def tensor_to_pil(images: torch.Tensor | List[torch.Tensor]) -> List[Image.Image]: if not isinstance(images, list): images = [images] imgs = [] for image in images: i = 255.0 * image.cpu().numpy() img = Image.fromarray(np.clip(np.squeeze(i), 0, 255).astype(np.uint8)) imgs.append(img) return imgs def pad_image(input_image): pad_w, pad_h = ( np.max(((2, 2), np.ceil(np.array(input_image.size) / 64).astype(int)), axis=0) * 64 - input_image.size ) im_padded = Image.fromarray( np.pad(np.array(input_image), ((0, pad_h), (0, pad_w), (0, 0)), mode="edge") ) w, h = im_padded.size if w == h: return im_padded elif w > h: new_image = Image.new(im_padded.mode, (w, w), (0, 0, 0)) new_image.paste(im_padded, (0, (w - h) // 2)) return new_image else: new_image = Image.new(im_padded.mode, (h, h), (0, 0, 0)) new_image.paste(im_padded, ((h - w) // 2, 0)) return new_image def pil_to_tensor(image: Image.Image) -> tuple[torch.Tensor, torch.Tensor]: output_images = [] output_masks = [] for i in ImageSequence.Iterator(image): i = ImageOps.exif_transpose(i) if i.mode == "I": i = i.point(lambda i: i * (1 / 255)) image = i.convert("RGB") image = np.array(image).astype(np.float32) / 255.0 image = torch.from_numpy(image)[None,] if "A" in i.getbands(): mask = np.array(i.getchannel("A")).astype(np.float32) / 255.0 mask = 1.0 - torch.from_numpy(mask) else: mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") output_images.append(image) output_masks.append(mask.unsqueeze(0)) if len(output_images) > 1: output_image = torch.cat(output_images, dim=0) output_mask = torch.cat(output_masks, dim=0) else: output_image = output_images[0] output_mask = output_masks[0] return (output_image, output_mask) def preprocess_image(im: np.ndarray, model_input_size: list) -> torch.Tensor: if len(im.shape) < 3: im = im[:, :, np.newaxis] # orig_im_size=im.shape[0:2] im_tensor = torch.tensor(im, dtype=torch.float32).permute(2, 0, 1) im_tensor = F.interpolate( torch.unsqueeze(im_tensor, 0), size=model_input_size, mode="bilinear" ).type(torch.uint8) image = torch.divide(im_tensor, 255.0) image = normalize(image, [0.5, 0.5, 0.5], [1.0, 1.0, 1.0]) return image def postprocess_image(result: torch.Tensor, im_size: list) -> np.ndarray: result = torch.squeeze(F.interpolate(result, size=im_size, mode="bilinear"), 0) ma = torch.max(result) mi = torch.min(result) result = (result - mi) / (ma - mi) im_array = (result * 255).permute(1, 2, 0).cpu().data.numpy().astype(np.uint8) im_array = np.squeeze(im_array) return im_array def downloadModels(): MODEL_PATH = hf_hub_download( repo_id="lllyasviel/fav_models", subfolder="fav", filename="juggernautXL_v8Rundiffusion.safetensors", ) LAYERS_PATH = snapshot_download( repo_id="LayerDiffusion/layerdiffusion-v1", allow_patterns="*.safetensors" ) for file in Path(LAYERS_PATH).glob("*.safetensors"): target_path = Path(f"./ComfyUI/models/layer_model/{file.name}") if not target_path.exists(): os.symlink(file, target_path) model_target_path = Path( "./ComfyUI/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors" ) if not model_target_path.exists(): os.symlink(MODEL_PATH, model_target_path)