HackHPC / app.py
Seanyoon's picture
Create app.py
80b8ba6
raw
history blame
2.23 kB
import gradio as gr
import torch
from semdiffusers import SemanticEditPipeline
device='cuda'
pipe = SemanticEditPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
).to(device)
def infer(prompt, seed):
gen = torch.Generator(device=device)
gen.manual_seed(seed)
out = pipe(
prompt=prompt,
generator=gen,
num_images_per_prompt=1,
guidance_scale=7
)
images = out.images[0]
out_edit = pipe(
prompt=prompt,
generator=gen,
num_images_per_prompt=1,
guidance_scale=7,
editing_prompt=['male person', 'female person'], # Concepts to apply
reverse_editing_direction=[True, False], # Direction of guidance i.e. decrease the first and increase the second concept
edit_warmup_steps=[10, 10], # Warmup period for each concept
edit_guidance_scale=[4, 4], # Guidance scale for each concept
edit_threshold=[0.95, 0.95], # Threshold for each concept. Threshold equals the percentile of the latent space that will be discarded. I.e. threshold=0.99 uses 1% of the latent dimensions
edit_momentum_scale=0.3, # Momentum scale that will be added to the latent guidance
edit_mom_beta=0.6, # Momentum beta
edit_weights=[1, 1] # Weights of the individual concepts against each other
)
images_edited = out_edit.images[0]
return [
(images, 'Stable Diffusion'),
(images_edited, 'Fair Diffusion')
]
inputs = [
gr.inputs.Textbox(label='Prompt'),
gr.inputs.Number(label='Seed', default=0, step=1)
]
outputs = gr.outputs.Image(label='Images', type='numpy', number=2)
title = 'Semantic Edit Pipeline'
description = 'Semantic Edit Pipeline implementation using SemDiffusers.'
article = "<h3 style='text-align: center'><a href='https://github.com/crowsonkb/semdiffusers'>SemDiffusers</a></h3>"
gr.Interface(
infer,
inputs,
outputs,
title=title,
description=description,
article=article,
theme='compact'
).launch();