# from https://github.com/lllyasviel/ControlNet/blob/main/gradio_canny2image.py import einops import numpy as np import torch from PIL import Image import sys import os import yaml CONTROL_NET_PATH = '/home/takuma/Documents/co/ControlNet-v1-1-nightly/' CONTROL_NET_MODEL_PATH = '../../ControlNet-v1-1' sys.path.append(CONTROL_NET_PATH) from share import * from pytorch_lightning import seed_everything from cldm.model import create_model, load_state_dict from cldm.ddim_hacked import DDIMSampler from diffusers.utils import load_image test_prompt = "best quality, extremely detailed" test_negative_prompt = "lowres, bad anatomy, worst quality, low quality" @torch.no_grad() def generate(prompt, n_prompt, seed, control, ddim_steps=20, eta=0.0, scale=9.0, H=512, W=512, strength = 1.0, guess_mode=False): seed_everything(seed) cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt] * num_samples)]} un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]} shape = (4, H // 8, W // 8) model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01 latent = torch.randn((1,) + shape, device="cpu", generator=torch.Generator(device="cpu").manual_seed(seed)).cuda() samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, x_T=latent, verbose=False, eta=eta, unconditional_guidance_scale=scale, unconditional_conditioning=un_cond) x_samples = model.decode_first_stage(samples) x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) return Image.fromarray(x_samples[0]) def control_images(control_image_folder, model_name): with open('./control_images.yaml', 'r') as f: d = yaml.safe_load(f) filenames = d[model_name] return [Image.open(f'{control_image_folder}/{fn}').convert("RGB") for fn in filenames] def make_image_condition(image, image_mask=None): image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 if image_mask is not None: image_mask = np.array(image_mask.convert("L")) assert ( image.shape[0:1] == image_mask.shape[0:1] ), "image and image_mask must have the same image size" image[image_mask < 128] = -1.0 # set as masked pixel image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) image = torch.from_numpy(image) return image if __name__ == '__main__': model_name = "p_sd15_inpaint" original_image_folder = "./control_images/" control_image_folder = './control_images/converted/' output_image_folder = './output_images/ref/' os.makedirs(output_image_folder, exist_ok=True) if model_name == 'p_sd15s2_lineart_anime': base_model_file = 'anything-v3-full.safetensors' else: base_model_file = 'v1-5-pruned.ckpt' num_samples = 1 model = create_model(f'{CONTROL_NET_MODEL_PATH}/control_v11{model_name}.yaml').cpu() model.load_state_dict(load_state_dict(f'{CONTROL_NET_PATH}/models/{base_model_file}', location='cuda'), strict=False) model.load_state_dict(load_state_dict(f'{CONTROL_NET_MODEL_PATH}/control_v11{model_name}.pth', location='cuda'), strict=False) model = model.cuda() ddim_sampler = DDIMSampler(model) original_image_filenames = [ "pexels-sound-on-3760767_512x512.png", "vermeer_512x512.png", "bird_512x512.png", ] inpaint_image_conditions = [ make_image_condition( Image.open(f"{original_image_folder}{fn}"), Image.open(f"{original_image_folder}mask_512x512.png"), ) for fn in original_image_filenames ] for i, control in enumerate(inpaint_image_conditions): # control = np.array(control)[:,:,::-1].copy() # control = torch.from_numpy(control).float().cuda() / 255.0 # control = torch.stack([control for _ in range(num_samples)], dim=0) # control = einops.rearrange(control, 'b h w c -> b c h w').clone() control = control.cuda() for seed in range(4): image = generate(test_prompt, test_negative_prompt, seed=seed, control=control) image.save(f'{output_image_folder}output_{model_name}_{i}_{seed}.png')