fffiloni's picture
Update app.py
108ebf6
raw
history blame
4.52 kB
import gradio as gr
from PIL import Image
from io import BytesIO
import torch
import os
from diffusers import DiffusionPipeline, DDIMScheduler
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
has_cuda = torch.cuda.is_available()
device = torch.device('cpu' if not has_cuda else 'cuda')
pipe = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
safety_checker=None,
use_auth_token=MY_SECRET_TOKEN,
custom_pipeline="imagic_stable_diffusion",
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
).to(device)
#generator = torch.Generator("cuda").manual_seed(0)
def infer(prompt, init_image):
init_image = Image.open(BytesIO(init_image)).convert("RGB")
init_image = init_image.resize((512, 512))
res = pipe.train(
prompt,
init_image,
guidance_scale=7.5,
num_inference_steps=50)
res = pipe(alpha=1)
return res.images[0]
title = """
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<h1 style="font-weight: 900; margin-bottom: 7px;">
Imagic Stable Diffusion • Community Pipeline
</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
Text-Based Real Image Editing with Diffusion Models
<br />This pipeline aims to implement <a href="https://arxiv.org/abs/2210.09276" target="_blank">this paper</a> to Stable Diffusion, allowing for real-world image editing.
</p>
<br /><img src="https://user-images.githubusercontent.com/788417/196388568-4ee45edd-e990-452c-899f-c25af32939be.png" style="margin:20px 0;"/>
<p style="font-size: 94%">
You can skip the queue by duplicating this space:
<a style="display: flex;align-items: center;justify-content: center;height: 30px;" href="https://huggingface.co/spaces/fffiloni/imagic-stable-diffusion?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
</p>
</div>
"""
article = """
<div class="footer">
<p><a href="https://github.com/huggingface/diffusers/tree/main/examples/community#imagic-stable-diffusion" target="_blank">Community pipeline</a>
baked by <a href="https://github.com/MarkRich" style="text-decoration: underline;" target="_blank">Mark Rich</a> -
Gradio Demo by 🤗 <a href="https://twitter.com/fffiloni" target="_blank">Sylvain Filoni</a>
</p>
</div>
"""
css = '''
#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
.footer {
margin-bottom: 45px;
margin-top: 35px;
text-align: center;
border-bottom: 1px solid #e5e5e5;
}
.footer>p {
font-size: .8rem;
display: inline-block;
padding: 0 10px;
transform: translateY(10px);
background: white;
}
.dark .footer {
border-color: #303030;
}
.dark .footer>p {
background: #0b0f19;
}
'''
with gr.Blocks(css=css) as block:
with gr.Column(elem_id="col-container"):
gr.HTML(title)
prompt_input = gr.Textbox(label="Target text", placeholder="Describe the image with what you want to change about the subject")
image_init = gr.Image(source="upload", type="filepath", label="Input Image")
submit_btn = gr.Button("Submit")
image_output = gr.Image(label="Edited image")
gr.HTML(article)
submit_btn.click(fn=infer, inputs=[prompt_input,image_init], outputs=[image_output])
block.queue(max_size=32,concurrency_count=20).launch(show_api=False)