from contextlib import nullcontext import gradio as gr import torch from torch import autocast from diffusers import SemanticStableDiffusionPipeline device = "cuda" if torch.cuda.is_available() else "cpu" pipe = SemanticStableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(device) gen = torch.Generator(device=device) # Sometimes the nsfw checker is confused by the Pokémon images, you can disable # it at your own risk here disable_safety = False if disable_safety: def null_safety(images, **kwargs): return images, False pipe.safety_checker = null_safety def infer(prompt, steps, scale, seed, editing_prompt_1 = None, reverse_editing_direction_1 = False, edit_warmup_steps_1=10, edit_guidance_scale_1=5, edit_threshold_1=0.95, editing_prompt_2 = None, reverse_editing_direction_2 = False, edit_warmup_steps_2=10, edit_guidance_scale_2=5, edit_threshold_2=0.95, edit_momentum_scale=0.5, edit_mom_beta=0.6): gen.manual_seed(seed) images = pipe(prompt, guidance_scale=scale, num_inference_steps=steps, generator=gen).images editing_prompt = [editing_prompt_1, editing_prompt_2] reverse_editing_direction = [reverse_editing_direction_1, reverse_editing_direction_2] edit_warmup_steps = [edit_warmup_steps_1, edit_warmup_steps_2] edit_guidance_scale = [edit_guidance_scale_1, edit_guidance_scale_2] edit_threshold = [edit_threshold_1, edit_threshold_2] indices = [ind for ind, val in enumerate(editing_prompt) if val is None or len(val) <= 1] for index in sorted(indices, reverse=True): del editing_prompt[index] del reverse_editing_direction[index] del edit_warmup_steps[index] del edit_guidance_scale[index] del edit_threshold[index] gen.manual_seed(seed) images.extend(pipe(prompt, guidance_scale=scale, num_inference_steps=steps, generator=gen, editing_prompt=editing_prompt, reverse_editing_direction=reverse_editing_direction, edit_warmup_steps=edit_warmup_steps, edit_guidance_scale=edit_guidance_scale, edit_momentum_scale=edit_momentum_scale, edit_mom_beta=edit_mom_beta ).images) return images css = """ a { color: inherit; text-decoration: underline; } .gradio-container { font-family: 'IBM Plex Sans', sans-serif; } .gr-button { color: white; border-color: #9d66e5; background: #9d66e5; } input[type='range'] { accent-color: #9d66e5; } .dark input[type='range'] { accent-color: #dfdfdf; } .container { max-width: 730px; margin: auto; padding-top: 1.5rem; } #gallery { min-height: 22rem; margin-bottom: 15px; margin-left: auto; margin-right: auto; border-bottom-right-radius: .5rem !important; border-bottom-left-radius: .5rem !important; } #gallery>div>.h-full { min-height: 20rem; } .details:hover { text-decoration: underline; } .gr-button { white-space: nowrap; } .gr-button:focus { border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1; --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); --tw-ring-opacity: .5; } #advanced-options { margin-bottom: 20px; } .footer { margin-bottom: 45px; margin-top: 35px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } .acknowledgments h4{ margin: 1.25em 0 .25em 0; font-weight: bold; font-size: 115%; } """ block = gr.Blocks(css=css) examples = [ [ 'a photo of a cat', 50, 7, 3, 'sunglasses', False, 10, 6, 0.95, '', False, 10, 5, 0.95 ], [ 'an image of a crowded boulevard, realistic, 4k', 50, 7, 9, 'crowd, crowded, people', True, 10, 8.3, 0.9, '', False, 10, 5, 0.95 ], [ 'a castle next to a river', 50, 7, 48, 'boat on a river', False, 15, 6, 0.9, 'monet, impression, sunrise', False, 18, 6, 0.8 ], [ 'a portrait of a king, full body shot, 8k', 50, 7, 33, 'male', True, 5, 5, 0.9, 'female', False, 5, 5, 0.9 ], [ 'a photo of a flowerpot', 50, 7, 2, 'glasses', False, 12, 5, 0.975, '', False, 10, 5, 0.95 ], [ 'a photo of the face of a woman', 50, 7, 21, 'smiling, smile', False, 15, 3, 0.99, 'curls, wavy hair, curly hair', False, 13, 3, 0.925 ], ] with block: gr.HTML( """
Interact with semantic concepts during the diffusion process. Details can be found in the paper SEGA: Instructing Diffusion using Semantic Dimensions.
Simply use the edit prompts to make arbitrary changes to the generation.
For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
Created by Manuel Brack and Patrick Schramowski at AIML Lab.