#!/usr/bin/env python import os import random import gradio as gr import numpy as np import PIL.Image import torch import torchvision.transforms.functional as TF from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler from controlnet_aux import PidiNetDetector, HEDdetector from diffusers.utils import load_image from huggingface_hub import HfApi from pathlib import Path from PIL import Image import torch import numpy as np import cv2 import os import random import spaces from gradio_imageslider import ImageSlider def nms(x, t, s): x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s) f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) y = np.zeros_like(x) for f in [f1, f2, f3, f4]: np.putmask(y, cv2.dilate(x, kernel=f) == x, x) z = np.zeros_like(y, dtype=np.uint8) z[y > t] = 255 return z DESCRIPTION = '''# Scribble SDXL 🖋️🌄 sketch to image with SDXL, using [@xinsir](https://huggingface.co/xinsir) [scribble sdxl controlnet](https://huggingface.co/xinsir/controlnet-scribble-sdxl-1.0) ''' if not torch.cuda.is_available(): DESCRIPTION += "\n

Running on CPU 🥶 This demo does not work on CPU.

" style_list = [ { "name": "(No style)", "prompt": "{prompt}", "negative_prompt": "", }, { "name": "Cinematic", "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy", "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured", }, { "name": "3D Model", "prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting", "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting", }, { "name": "Anime", "prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed", "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast", }, { "name": "Digital Art", "prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed", "negative_prompt": "photo, photorealistic, realism, ugly", }, { "name": "Photographic", "prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed", "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly", }, { "name": "Pixel art", "prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics", "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic", }, { "name": "Fantasy art", "prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy", "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white", }, { "name": "Neonpunk", "prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional", "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured", }, { "name": "Manga", "prompt": "manga style {prompt} . vibrant, high-energy, detailed, iconic, Japanese comic style", "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style", }, ] styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list} STYLE_NAMES = list(styles.keys()) DEFAULT_STYLE_NAME = "(No style)" def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]: p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME]) return p.replace("{prompt}", positive), n + negative device = torch.device("cuda" if torch.cuda.is_available() else "cpu") eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler") controlnet = ControlNetModel.from_pretrained( "xinsir/controlnet-scribble-sdxl-1.0", torch_dtype=torch.float16 ) # when test with other base model, you need to change the vae also. vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16, scheduler=eulera_scheduler, ) pipe.to(device) # Load model. MAX_SEED = np.iinfo(np.int32).max processor = HEDdetector.from_pretrained('lllyasviel/Annotators') def nms(x, t, s): x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s) f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) y = np.zeros_like(x) for f in [f1, f2, f3, f4]: np.putmask(y, cv2.dilate(x, kernel=f) == x, x) z = np.zeros_like(y, dtype=np.uint8) z[y > t] = 255 return z def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed @spaces.GPU def run( image: PIL.Image.Image, prompt: str, negative_prompt: str, style_name: str = DEFAULT_STYLE_NAME, num_steps: int = 25, guidance_scale: float = 5, controlnet_conditioning_scale: float = 1.0, seed: int = 0, use_hed: bool = False, progress=gr.Progress(track_tqdm=True), ) -> PIL.Image.Image: # image = image.convert("RGB") # image = TF.to_tensor(image) > 0.5 # image = TF.to_pil_image(image.to(torch.float32)) width, height = image['composite'].size ratio = np.sqrt(1024. * 1024. / (width * height)) new_width, new_height = int(width * ratio), int(height * ratio) image = image['composite'].resize((new_width, new_height)) if not use_hed: controlnet_img = image else: controlnet_img = processor(image, scribble=False) # following is some processing to simulate human sketch draw, different threshold can generate different width of lines controlnet_img = np.array(controlnet_img) controlnet_img = nms(controlnet_img, 127, 3) controlnet_img = cv2.GaussianBlur(controlnet_img, (0, 0), 3) # higher threshold, thiner line random_val = int(round(random.uniform(0.01, 0.10), 2) * 255) controlnet_img[controlnet_img > random_val] = 255 controlnet_img[controlnet_img < 255] = 0 image = Image.fromarray(controlnet_img) prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt) generator = torch.Generator(device=device).manual_seed(seed) out = pipe( prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=num_steps, generator=generator, controlnet_conditioning_scale=controlnet_conditioning_scale, guidance_scale=guidance_scale, width=new_width, height=new_height, ).images[0] return out, controlnet_img with gr.Blocks(css="style.css") as demo: gr.Markdown(DESCRIPTION, elem_id="description") gr.DuplicateButton( value="Duplicate Space for private use", elem_id="duplicate-button", visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1", ) with gr.Row(): with gr.Column(): with gr.Group(): image = gr.ImageEditor(type="pil", image_mode="L", crop_size=(512, 512),brush=gr.Brush(color_mode="fixed", colors=["#00000"])) prompt = gr.Textbox(label="Prompt") style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME) use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image instead of sketching") run_button = gr.Button("Run") with gr.Accordion("Advanced options", open=False): negative_prompt = gr.Textbox( label="Negative prompt", value=" extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured", ) num_steps = gr.Slider( label="Number of steps", minimum=1, maximum=50, step=1, value=25, ) guidance_scale = gr.Slider( label="Guidance scale", minimum=0.1, maximum=10.0, step=0.1, value=5, ) controlnet_conditioning_scale = gr.Slider( label="controlnet conditioning scale", minimum=0.5, maximum=5.0, step=0.1, value=0.9, ) seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Column(): with gr.Group(): image_slider = ImageSlider(position=0.5) with gr.Row(): result = gr.Image(label="result", height=400) sketch_image = gr.Image(label="sketch") inputs = [ image, prompt, negative_prompt, style, num_steps, guidance_scale, controlnet_conditioning_scale, seed, use_hed, ] outputs = [image_slider, result, sketch_image] run_button.click(lambda x: None, inputs=None, outputs=image_slider).then( fn=run, inputs=inputs, outputs=outputs ) prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False, ).then(lambda x: None, inputs=None, outputs=image_slider).then( fn=run, inputs=inputs, outputs=outputs, api_name=False, ) negative_prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False, ).then(lambda x: None, inputs=None, outputs=image_slider).then( fn=run, inputs=inputs, outputs=outputs, api_name=False, ) demo.queue().launch()