9fo912 / app.py
pengdaqian
fix more
e8c43c7
raw
history blame
17.9 kB
import random
import gradio as gr
from datasets import load_dataset
from PIL import Image
from model import get_sd_small, get_sd_tiny
from trans_google import google_translator
from i18n import i18nTranslator
word_list_dataset = load_dataset("Gustavosta/Stable-Diffusion-Prompts")
word_list = word_list_dataset["train"]['Prompt']
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler, DDIMScheduler, KDPM2AncestralDiscreteScheduler, \
UniPCMultistepScheduler, DPMSolverSinglestepScheduler, DEISMultistepScheduler, PNDMScheduler, \
DPMSolverMultistepScheduler, HeunDiscreteScheduler, EulerAncestralDiscreteScheduler, DDPMScheduler, \
LMSDiscreteScheduler, KDPM2DiscreteScheduler
import torch
import base64
from io import BytesIO
is_gpu_busy = False
# translator = i18nTranslator()
# translator.init(path='locales')
samplers = [
"EulerDiscrete",
"EulerAncestralDiscrete",
"UniPCMultistep",
"DPMSolverSinglestep",
"DPMSolverMultistep",
"KDPM2Discrete",
"KDPM2AncestralDiscrete",
"DEISMultistep",
"HeunDiscrete",
"PNDM",
"DDPM",
"DDIM",
"LMSDiscrete",
]
rand = random.Random()
translator = google_translator()
pipe = get_sd_tiny()
other_pipe = get_sd_small()
def get_pipe(width: int, height: int):
if width == 512 and height == 512:
return pipe
else:
return other_pipe
def infer(prompt: str, negative: str, width: int, height: int, sampler: str, steps: int, seed: int, scale):
global is_gpu_busy
if seed == 0:
seed = rand.randint(0, 10000)
else:
seed = int(seed)
pipeline = get_pipe(width, height)
images = []
if torch.cuda.is_available():
generator = torch.Generator(device="cuda").manual_seed(seed)
else:
generator = None
if sampler == "EulerDiscrete":
pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
elif sampler == "EulerAncestralDiscrete":
pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config)
elif sampler == "KDPM2Discrete":
pipeline.scheduler = KDPM2DiscreteScheduler.from_config(pipeline.scheduler.config)
elif sampler == "KDPM2AncestralDiscrete":
pipeline.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipeline.scheduler.config)
elif sampler == "UniPCMultistep":
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
elif sampler == "DPMSolverSinglestep":
pipeline.scheduler = DPMSolverSinglestepScheduler.from_config(pipeline.scheduler.config)
elif sampler == "DPMSolverMultistep":
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
elif sampler == "HeunDiscrete":
pipeline.scheduler = HeunDiscreteScheduler.from_config(pipeline.scheduler.config)
elif sampler == "DEISMultistep":
pipeline.scheduler = DEISMultistepScheduler.from_config(pipeline.scheduler.config)
elif sampler == "PNDM":
pipeline.scheduler = PNDMScheduler.from_config(pipeline.scheduler.config)
elif sampler == "DDPM":
pipeline.scheduler = DDPMScheduler.from_config(pipeline.scheduler.config)
elif sampler == "DDIM":
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
elif sampler == "LMSDiscrete":
pipeline.scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)
try:
translate_prompt = translator.translate(prompt, lang_tgt='en')
translate_negative = translator.translate(negative, lang_tgt='en')
except Exception as ex:
print(ex)
translate_prompt = prompt
translate_negative = negative
image = pipeline(prompt=translate_prompt,
negative_prompt=translate_negative,
guidance_scale=scale,
num_inference_steps=steps,
generator=generator,
height=height,
width=width).images[0]
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue())
img_base64 = bytes("data:image/jpeg;base64,", encoding='utf-8') + img_str
images.append(img_base64)
return images
css = """
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
}
.gr-button {
color: white;
border-color: black;
background: black;
}
input[type='range'] {
accent-color: black;
}
.dark input[type='range'] {
accent-color: #dfdfdf;
}
.container {
max-width: 1130px;
margin: auto;
padding-top: 1.5rem;
}
#prompt-column {
min-height: 520px
}
#gallery {
min-height: 22rem;
margin-bottom: 15px;
margin-left: auto;
margin-right: auto;
border-bottom-right-radius: .5rem !important;
border-bottom-left-radius: .5rem !important;
}
#gallery>div>.h-full {
min-height: 20rem;
}
.details:hover {
text-decoration: underline;
}
.gr-button {
white-space: nowrap;
}
.gr-button:focus {
border-color: rgb(147 197 253 / var(--tw-border-opacity));
outline: none;
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
--tw-border-opacity: 1;
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
--tw-ring-opacity: .5;
}
#advanced-btn {
font-size: .7rem !important;
line-height: 19px;
margin-top: 12px;
margin-bottom: 12px;
padding: 2px 8px;
border-radius: 14px !important;
}
#advanced-options {
display: none;
margin-bottom: 20px;
}
.footer {
margin-bottom: 45px;
margin-top: 35px;
text-align: center;
border-bottom: 1px solid #e5e5e5;
}
.footer>p {
font-size: .8rem;
display: inline-block;
padding: 0 10px;
transform: translateY(10px);
background: white;
}
.dark .footer {
border-color: #303030;
}
.dark .footer>p {
background: #0b0f19;
}
.acknowledgments h4{
margin: 1.25em 0 .25em 0;
font-weight: bold;
font-size: 115%;
}
.animate-spin {
animation: spin 1s linear infinite;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
#share-btn-container {
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
margin-top: 10px;
margin-left: auto;
}
#share-btn {
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
}
#share-btn * {
all: unset;
}
#share-btn-container div:nth-child(-n+2){
width: auto !important;
min-height: 0px !important;
}
#share-btn-container .wrap {
display: none !important;
}
.gr-form{
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
}
#prompt-container{
gap: 0;
}
#prompt-text-input, #negative-prompt-text-input{padding: .45rem 0.625rem}
#component-16{border-top-width: 1px!important;margin-top: 1em}
.image_duplication{position: absolute; width: 100px; left: 50px}
.generate-container {display: flex; justify-content: flex-end;}
#generate-btn {background: linear-gradient(to bottom right, #ffedd5, #fdba74)}
"""
block = gr.Blocks(css=css)
# text, negative, width, height, sampler, steps, seed, guidance_scale
# examples = [
# [
# 'A high tech solarpunk utopia in the Amazon rainforest',
# 'low quality',
# 512,
# 512,
# 'ddim',
# 30,
# 0,
# 9
# ],
# [
# 'A pikachu fine dining with a view to the Eiffel Tower',
# 'low quality',
# 512,
# 512,
# 'ddim',
# 30,
# 0,
# 9
# ],
# [
# 'A mecha robot in a favela in expressionist style',
# 'low quality, 3d, photorealistic',
# 512,
# 512,
# 'ddim',
# 30,
# 0,
# 9
# ],
# [
# 'an insect robot preparing a delicious meal',
# 'low quality, illustration',
# 512,
# 512,
# 'ddim',
# 30,
# 0,
# 9
# ],
# [
# "A small cabin on top of a snowy mountain in the style of Disney, artstation",
# 'low quality, ugly',
# 512,
# 512,
# 'ddim',
# 30,
# 0,
# 9
# ],
# ]
examples = list(map(lambda x: [
x,
'low quality',
512,
512,
'ddim',
30,
0,
9
], word_list))[:500]
with block:
gr.HTML(
"""
<div style="text-align: center; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<svg
width="0.65em"
height="0.65em"
viewBox="0 0 115 115"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<rect width="23" height="23" fill="white"></rect>
<rect y="69" width="23" height="23" fill="white"></rect>
<rect x="23" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="46" width="23" height="23" fill="white"></rect>
<rect x="46" y="69" width="23" height="23" fill="white"></rect>
<rect x="69" width="23" height="23" fill="black"></rect>
<rect x="69" y="69" width="23" height="23" fill="black"></rect>
<rect x="92" width="23" height="23" fill="#D9D9D9"></rect>
<rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="115" y="46" width="23" height="23" fill="white"></rect>
<rect x="115" y="115" width="23" height="23" fill="white"></rect>
<rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect>
<rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="92" y="69" width="23" height="23" fill="white"></rect>
<rect x="69" y="46" width="23" height="23" fill="white"></rect>
<rect x="69" y="115" width="23" height="23" fill="white"></rect>
<rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect>
<rect x="46" y="46" width="23" height="23" fill="black"></rect>
<rect x="46" y="115" width="23" height="23" fill="black"></rect>
<rect x="46" y="69" width="23" height="23" fill="black"></rect>
<rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect>
<rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="23" y="69" width="23" height="23" fill="black"></rect>
</svg>
<h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
Stable Diffusion 2.1 Demo
</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">
Stable Diffusion 2.1 Demo App. <br />
Click "Generate image" Button to generate image. <br />
Also Change params to have a try <br />
512*512 is optimized, every image will cost 30s. <br />
other size may cost more time. <br />
It's just a simplified demo, you can use more advanced features optimize image quality <br />
</p>
</div>
"""
)
with gr.Group():
with gr.Box():
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
with gr.Column(elem_id="prompt-column"):
text = gr.Textbox(
label="Enter your prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
elem_id="prompt-text-input",
).style(
border=(True, False, True, True),
rounded=(True, False, False, True),
container=False,
)
negative = gr.Textbox(
label="Enter your negative prompt",
show_label=False,
max_lines=1,
placeholder="Enter a negative prompt",
elem_id="negative-prompt-text-input",
).style(
border=(True, False, True, True),
rounded=(True, False, False, True),
container=False,
)
with gr.Row(elem_id="txt2img_size", scale=4):
width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512,
elem_id="txt2img_width")
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512,
elem_id="txt2img_height")
with gr.Row(elem_id="txt2img_sampler", scale=4):
seed = gr.Number(value=0, label="Seed", elem_id="txt2img_seed")
sampler = gr.Dropdown(
samplers, value="",
multiselect=False,
label="Sampler",
info="sampler select"
)
steps = gr.Slider(minimum=1, maximum=80, step=1, elem_id=f"steps", label="Sampling steps",
value=20)
with gr.Accordion("Advanced settings", open=False):
# gr.Markdown("Advanced settings are temporarily unavailable")
# samples = gr.Slider(label="Images", minimum=1, maximum=4, value=4, step=1)
# steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=45, step=1)
guidance_scale = gr.Slider(
label="Guidance Scale", minimum=0, maximum=50, value=9, step=0.1
)
with gr.Row(elem_id="generate-container", elem_classes="generate-container").style(height="100"):
btn = gr.Button("Generate image", elem_id="generate-btn", elem_classes="generate-btn").style(
margin=False,
rounded=(False, True, True, False),
full_width=False,
)
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
).style(grid=[2], height="auto")
# with gr.Group(elem_id="container-advanced-btns"):
# # advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
# with gr.Group(elem_id="share-btn-container"):
# community_icon = gr.HTML(community_icon_html)
# loading_icon = gr.HTML(loading_icon_html)
# share_button = gr.Button("Share to community", elem_id="share-btn")
ex = gr.Examples(examples=examples, fn=infer,
inputs=[text, negative, width, height, sampler, steps, seed, guidance_scale],
outputs=[gallery],
examples_per_page=5,
cache_examples=False)
ex.dataset.headers = [""]
negative.submit(infer, inputs=[text, negative, width, height, sampler, steps, seed, guidance_scale],
outputs=[gallery], postprocess=False)
text.submit(infer, inputs=[text, negative, width, height, sampler, steps, seed, guidance_scale],
outputs=[gallery], postprocess=False)
btn.click(infer, inputs=[text, negative, width, height, sampler, steps, seed, guidance_scale],
outputs=[gallery], postprocess=False)
block.queue(concurrency_count=80,
max_size=100).launch(
max_threads=150,
# server_port=6006,
# share=True,
)