UrangCompare / app.py
kayfahaarukku's picture
1.4 update
6c52d81 verified
raw
history blame contribute delete
No virus
6.61 kB
import os
import spaces
import torch
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
import gradio as gr
import random
import tqdm
# Enable TQDM progress tracking
tqdm.monitor_interval = 0
# Load the diffusion pipelines
pipe1 = StableDiffusionXLPipeline.from_pretrained(
"kayfahaarukku/UrangDiffusion-1.3",
torch_dtype=torch.float16,
custom_pipeline="lpw_stable_diffusion_xl",
)
pipe1.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe1.scheduler.config)
pipe2 = StableDiffusionXLPipeline.from_pretrained(
"kayfahaarukku/UrangDiffusion-1.4",
torch_dtype=torch.float16,
custom_pipeline="lpw_stable_diffusion_xl",
)
pipe2.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe2.scheduler.config)
# Function to generate images from both models
@spaces.GPU
def generate_comparison(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
pipe1.to('cuda')
pipe2.to('cuda')
if randomize_seed:
seed = random.randint(0, 99999999)
if use_defaults:
prompt = f"{prompt}, masterpiece, best quality, amazing quality, very aesthetic"
negative_prompt = f"nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract], {negative_prompt}"
generator = torch.manual_seed(seed)
def callback(step, timestep, latents):
progress(step / (2 * num_inference_steps))
return
width, height = map(int, resolution.split('x'))
# Generate image with UrangDiffusion-1.3
image1 = pipe1(
prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
callback=callback,
callback_steps=1
).images[0]
# Generate image with UrangDiffusion-1.4
image2 = pipe2(
prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
callback=callback,
callback_steps=1
).images[0]
torch.cuda.empty_cache()
metadata_text = f"{prompt}\nNegative prompt: {negative_prompt}\nSteps: {num_inference_steps}, Sampler: Euler a, Size: {width}x{height}, Seed: {seed}, CFG scale: {guidance_scale}"
return image1, image2, seed, metadata_text
# Define Gradio interface
def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
image1, image2, seed, metadata_text = generate_comparison(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
return image1, image2, seed, gr.update(value=metadata_text)
def reset_inputs():
return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='896x1152'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True), gr.update(value='')
with gr.Blocks(title="UrangDiffusion Comparison Demo", theme="NoCrypt/miku@1.2.1") as demo:
gr.HTML(
"<h1>UrangDiffusion 1.3 vs 1.4 Comparison Demo</h1>"
"This demo showcases a comparison between UrangDiffusion 1.3 and 1.4."
)
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
negative_prompt_input = gr.Textbox(lines=2, placeholder="Enter negative prompt here", label="Negative Prompt")
use_defaults_input = gr.Checkbox(label="Use Default Quality Tags and Negative Prompt", value=True)
resolution_input = gr.Radio(
choices=[
"1024x1024", "1152x896", "896x1152", "1216x832", "832x1216",
"1344x768", "768x1344", "1536x640", "640x1536"
],
label="Resolution",
value="896x1152"
)
guidance_scale_input = gr.Slider(minimum=1, maximum=20, step=0.5, label="Guidance Scale", value=5)
num_inference_steps_input = gr.Slider(minimum=1, maximum=100, step=1, label="Number of Inference Steps", value=26)
seed_input = gr.Slider(minimum=0, maximum=99999999, step=1, label="Seed", value=0, interactive=True)
randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
generate_button = gr.Button("Generate Comparison")
reset_button = gr.Button("Reset")
with gr.Column():
with gr.Row():
output_image1 = gr.Image(type="pil", label="UrangDiffusion 1.3")
output_image2 = gr.Image(type="pil", label="UrangDiffusion 1.4")
with gr.Accordion("Parameters", open=False):
gr.Markdown(
"""
This parameter is compatible with Stable Diffusion WebUI's parameter importer.
"""
)
metadata_textbox = gr.Textbox(lines=6, label="Image Parameters", interactive=False, max_lines=6)
gr.Markdown(
"""
### Recommended prompt formatting:
`1girl/1boy, character name, from what series, everything else in any order, masterpiece, best quality`
**PS:** `masterpiece, best quality, amazing quality, very aesthetic` is automatically added when "Use Default Quality Tags and Negative Prompt" is enabled
### Recommended settings:
- Steps: 25-30
- CFG: 5-7
"""
)
generate_button.click(
interface_fn,
inputs=[
prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
],
outputs=[output_image1, output_image2, seed_input, metadata_textbox]
)
reset_button.click(
reset_inputs,
inputs=[],
outputs=[
prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input, metadata_textbox
]
)
demo.queue(max_size=20).launch(share=False)