AP123's picture
Create app.py
b295b08
raw history blame
No virus
7.39 kB
from diffusers import StableDiffusionXLPipeline
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from PIL import Image, ImageOps
import gradio as gr
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
variants="fp16",
use_safetensor=True,
)
pipe.to("cuda")
@torch.no_grad()
def call(
pipe, prompt, prompt2, height, width, num_inference_steps, denoising_end,
guidance_scale, guidance_scale2, negative_prompt, negative_prompt2,
num_images_per_prompt, eta, generator, latents, prompt_embeds,
negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds,
output_type, return_dict, callback, callback_steps, cross_attention_kwargs,
guidance_rescale, original_size, crops_coords_top_left, target_size,
negative_original_size, negative_crops_coords_top_left, negative_target_size):
height = height or pipe.default_sample_size * pipe.vae_scale_factor
width = width or pipe.default_sample_size * pipe.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
pipe.check_inputs(prompt, None, height, width, callback_steps, negative_prompt, None, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds)
batch_size = 1 if isinstance(prompt, str) else len(prompt) if isinstance(prompt, list) else prompt_embeds.shape[0]
device = pipe._execution_device
do_classifier_free_guidance = guidance_scale > 1.0
text_encoder_lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs else None
prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = pipe.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt)
prompt2_embeds, negative_prompt2_embeds, pooled_prompt2_embeds, negative_pooled_prompt2_embeds = pipe.encode_prompt(prompt2, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt2)
pipe.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = pipe.scheduler.timesteps
num_channels_latents = pipe.unet.config.in_channels
latents = pipe.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents)
extra_step_kwargs = pipe.prepare_extra_step_kwargs(generator, eta)
add_text_embeds, add_text2_embeds = pooled_prompt_embeds, pooled_prompt2_embeds
add_time_ids = pipe._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype)
add_time2_ids = pipe._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt2_embeds.dtype)
negative_add_time_ids = pipe._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype) if negative_original_size and negative_target_size else add_time_ids
if do_classifier_free_guidance:
prompt_embeds, add_text_embeds, add_time_ids = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0), torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0), torch.cat([negative_add_time_ids, add_time_ids], dim=0)
prompt2_embeds, add_text2_embeds, add_time2_ids = torch.cat([negative_prompt2_embeds, prompt2_embeds], dim=0), torch.cat([negative_pooled_prompt2_embeds, add_text2_embeds], dim=0), torch.cat([negative_add_time_ids, add_time2_ids], dim=0)
prompt_embeds, add_text_embeds, add_time_ids = prompt_embeds.to(device), add_text_embeds.to(device), add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
prompt2_embeds, add_text2_embeds, add_time2_ids = prompt2_embeds.to(device), add_text2_embeds.to(device), add_time2_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
num_warmup_steps = max(len(timesteps) - num_inference_steps * pipe.scheduler.order, 0)
if denoising_end and isinstance(denoising_end, float) and 0 < denoising_end < 1:
discrete_timestep_cutoff = int(round(pipe.scheduler.config.num_train_timesteps - (denoising_end * pipe.scheduler.config.num_train_timesteps)))
num_inference_steps = len([ts for ts in timesteps if ts >= discrete_timestep_cutoff])
timesteps = timesteps[:num_inference_steps]
with pipe.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
if i % 2 == 0:
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = pipe.scheduler.scale_model_input(latent_model_input, t)
noise_pred = pipe.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs={"text_embeds": add_text_embeds, "time_ids": add_time_ids})[0]
if do_classifier_free_guidance:
noise_pred = noise_pred.chunk(2)[0] + guidance_scale * (noise_pred.chunk(2)[1] - noise_pred.chunk(2)[0])
else:
latent_model_input2 = torch.cat([latents.flip(2)] * 2) if do_classifier_free_guidance else latents
latent_model_input2 = pipe.scheduler.scale_model_input(latent_model_input2, t)
noise_pred2 = pipe.unet(latent_model_input2
def simple_call(prompt1, prompt2, guidance_scale1, guidance_scale2, negative_prompt1, negative_prompt2):
generator = [torch.Generator(device="cuda").manual_seed(5)]
res = call(pipe, prompt1, prompt2, width=768, height=768, num_images_per_prompt=1, num_inference_steps=50, guidance_scale=guidance_scale1, guidance_scale2=guidance_scale2, negative_prompt=negative_prompt1, negative_prompt2=negative_prompt2, generator=generator)
image1 = res.images[0]
image2 = ImageOps.exif_transpose(image1.rotate(180, resample=0))
return image1, image2
with gr.Blocks() as app:
gr.Markdown(
'''
<center><h1>Upside Down Diffusion</h1></span>
Placeholder
</center>
'''
)
with gr.Row():
with gr.Column():
prompt1 = gr.Textbox(label="Prompt 1")
prompt2 = gr.Textbox(label="Prompt 2")
negative_prompt1 = gr.Textbox(label="Negative Prompt 1")
negative_prompt2 = gr.Textbox(label="Negative Prompt 2")
guidance_scale1 = gr.Slider(minimum=0, maximum=10, step=0.1, label="Guidance Scale 1")
guidance_scale2 = gr.Slider(minimum=0, maximum=10, step=0.1, label="Guidance Scale 2")
run_btn = gr.Button("Run")
with gr.Accordion(label="Advanced Options", open=False):
# You can place additional sliders or options here
pass
with gr.Column():
result_image1 = gr.Image(label="Output 1")
result_image2 = gr.Image(label="Output 2 (Rotated)")
run_btn.click(
simple_call,
inputs=[prompt1, prompt2, guidance_scale1, guidance_scale2, negative_prompt1, negative_prompt2],
outputs=[result_image1, result_image2]
)
app.queue(max_size=20)
if __name__ == "__main__":
app.launch(debug=True)