magicfixeseverything's picture
Upload 2 files
cf6ac90
raw
history blame
No virus
107 kB
import gradio as gr
import torch
#import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline
import os
##########
#
# Original script by:
# https://huggingface.co/Manjushri
#
# This version have been adapted from that person's versions.
#
##########
# Tested with gradio version 4.8.0
# https://www.gradio.app/main/docs/interface
##########
# To launch this script, use the following in the command prompt, taking off
# the # at the start. (You will need to adjust the start of the path if you
# have changed the location)
#cd C:\Diffusers && .venv\Scripts\activate.bat && py .venv\sdxl_and_photoreal\sdxl_and_photoreal.py
##########
# IMPORTANT NOTES:
#
# You must have a NVIDIA graphics card in your computer with Cuda
# installed to use this script. It will not work on just a CPU on
# Windows.
#
# If not using "enable_model_cpu_offload" or
# "enable_sequential_cpu_offload", memory usage will remain high until
# command prompt is closed. (whether image is being created or not)
###############################################################################
###############################################################################
#
#
#
# Begin Configurations
#
#
#
###############################################################################
###############################################################################
#
# Main Directory
#
# This is where everything goes. Your Python virtual environment should
# be here. Model data will be stored here. (unless you change the next
# configuration) If configured, imagery will also be automatically be
# saved here.
#
main_dir = "C:/Diffusers"
####################
#
# Use Custom Hugging Face Cache Directory
#
# The folder where model data is stored can get huge. I choose to add it
# to a place where I am more likely to notice it more often. If you use
# other Hugging Face things however, and will use these models in those
# other things, then you might want to consider not having this here as
# it would duplicate the model data.
#
# If set to 1, the data would be here:
# C:\Diffusers\model_data
#
# If set to 0, the data would be here:
# %USERPROFILE%/.cache/huggingface/hub
# Which would look like this, where {Username} is the username of
# your Windows account:
# C:\Users\{Username}\.cache\huggingface\hub
#
# You need to clean out the folder occasionally as this folder will get
# extremely large. Eventually, it would take up all the space on your
# computer.
#
use_custom_hugging_face_cache_dir = 1
#####
#
# Name of Model Data Folder
#
# This is where all the model data will go. (unless you changed it in the
# previous configuration) This folder will get very large. You need to
# clean it out manually occasionally.
#
cache_directory_folder_name = "model_data"
####################
#
# Default Base Model
#
# This will automatically be SDXL Turbo if you are running this on a CPU.
#
default_base_model = "sdxl"
####################
#
# Auto Save Imagery
#
# You can automatically save the image file, and a text file with the
# prompt details.
#
auto_save_imagery = 1
#####
#
# Name of Saved Images Folder
#
# You can change the name of this folder if you want. Imagery will be
# saved in a folder called "saved_images" in the directory configured
# in "main_dir". (the saved images folder will be created
# automatically) A directory for each day will be created in this
# folder. Imagery will then be placed in each folder.
#
saved_images_folder_name = "saved_images"
####################
#
# Auto Open Browser From Command Prompt
#
auto_open_browser = 1
####################
#
# Make Seed Selection A Textbox
#
# If you use a slider or number field for the seed, some seeds can't be
# duplicated using those fields. If you enter a number greater than
# 9007199254740992, the seed won't reliably be used. Check the text
# details to see if it was used. This is a technical limitation as of
# writing this. See the bug report here:
# https://github.com/gradio-app/gradio/issues/5354
#
# Using the slider, and not entering a number, might be the way to get
# reliable numbers above that number. Just don't then use the up and down
# arrows in the field to go up or down a number.
#
# The way to use seeds higher than that reliably is to set this variable
# to 1.
make_seed_selection_a_textbox = 0
####################
#
# Include Close Command Prompt / Cancel Button
#
# This doesn't work well at all. It just closes the command prompt. And
# it currently isn't canceling image creation either when used. Don't use
# it.
#
enable_close_command_prompt_button = 0
####################
#
# Use Denoising Start In Base Model When Using Refiner
#
# If set to "1", refining will end at the percent (expressed as decimal)
# defined in the denoising start for the refiner. If the steps set are
# 100, and the denoising start value is 0.75, the base model will run for
# 75 steps. The refiner will then run for 25 steps.
#
default_use_denoising_start_in_base_model_when_using_refiner = 0
####################
#
# Base Model Output To Refiner Is In Latent Space
#
# If set to "1", base model output is in latent space instead of PIL
# image when sent to refiner.
#
default_base_model_output_to_refiner_is_in_latent_space = 1
####################
#
# Log Generation Times
#
# Log generation times to saved text output. The initial time it takes to
# load a model is not included in the generation time.
#
log_generation_times = 1
####################
#
# Use Image Gallery
#
# There is a bug in downloading images:
# https://github.com/gradio-app/gradio/issues/6486
# It saves as HTML rather than image.
#
# If you use the gallery, you can turn the download button off for now.
# They can still right click and save the image.
#
use_image_gallery = 1
show_download_button_for_gallery = 0
####################
#
# Show Image Creation Progress Log
#
# This adds the current step that image generation is on.
#
show_image_creation_progress_log = 1
####################
#
# Show Messages In Command Prompt
#
# Messages will be printed in command prompt.
#
show_messages_in_command_prompt = 1
####################
#
# Show Messages In Modal On Page
#
# A popup appears in the top right corner on the page.
#
show_messages_in_modal_on_page = 0
####################
#
# Up Next Is Various Configuration Arrays and Objects
#
####################
base_model_array = [
"sdxl",
"photoreal",
"sdxl_turbo",
"sd_1_5_runwayml"
]
base_model_names_object = {
"sdxl": "Stable Diffusion XL 1.0",
"photoreal": "PhotoReal",
"sdxl_turbo": "Stable Diffusion XL Turbo",
"sd_1_5_runwayml": "Stable Diffusion 1.5"
}
####################
#
# "sdxl_default"
#
# - My customized configurations. (subject to change)
#
# "sdxl_2023-11-12"
#
# - Valid from November 12th to present.
# Number of steps in upscaler changed from 5 to 15.
#
# "sdxl_2023-09-05"
#
# - Valid from September 5th to November 12th.
# There were changes on this date.
#
# "photoreal_default"
#
# - My customized configurations. (subject to change)
# "circulus/canvers-real-v3.7.5"
#
# Seeds do not match the online PhotoReal version.
#
# "photoreal_2023-11-12"
#
# - Valid from November 12th to present.
# New base model: "circulus/canvers-real-v3.7.5"
#
# "photoreal_2023-09-01"
#
# - Valid from September 1st to November 12th.
# "circulus/canvers-realistic-v3.6" was already in effect.
# But there were changes on this date.
#
# "sdxl_turbo_default"
#
# - My customized configurations. (subject to change)
#
# "sd_1_5_runwayml_default"
#
# - My customized configurations. (subject to change)
#
base_model_object_of_model_configuration_arrays = {
"sdxl": [
"sdxl_default",
"sdxl_2023-11-12",
"sdxl_2023-09-05"
],
"photoreal": [
"photoreal_default",
"photoreal_2023-11-12",
"photoreal_2023-09-01"
],
"sdxl_turbo": [
"sdxl_turbo_default"
],
"sd_1_5_runwayml": [
"sd_1_5_runwayml_default"
]
}
####################
model_configuration_names_object = {
"sdxl_default": "1.0 - Default (subject to change)",
"sdxl_2023-11-12": "1.0 (2023-11-12 online config)",
"sdxl_2023-09-05": "1.0 (2023-09-05 online config)",
"photoreal_default": "3.7.5 - Default (subject to change)",
"photoreal_2023-11-12": "3.7.5 (2023-11-12 online config)",
"photoreal_2023-09-01": "3.6 (2023-09-01 online config)",
"sdxl_turbo_default": "Default (subject to change)",
"sd_1_5_runwayml_default": "1.5 - Default (subject to change)"
}
model_configuration_links_object = {
"sdxl_default": "stabilityai/stable-diffusion-xl-base-1.0",
"sdxl_2023-11-12": "stabilityai/stable-diffusion-xl-base-1.0",
"sdxl_2023-09-05": "stabilityai/stable-diffusion-xl-base-1.0",
"photoreal_default": "circulus/canvers-real-v3.7.5",
"photoreal_2023-11-12": "circulus/canvers-real-v3.7.5",
"photoreal_2023-09-01": "circulus/canvers-realistic-v3.6",
"sdxl_turbo_default": "stabilityai/sdxl-turbo",
"sd_1_5_runwayml_default": "runwayml/stable-diffusion-v1-5"
}
model_configuration_force_refiner_object = {
"sdxl_2023-11-12": 1,
"sdxl_2023-09-05": 1
}
# For now, the ones that force the refiner also have the "Refiner Number of
# Iterations" available.
model_configuration_include_refiner_number_of_steps_object = model_configuration_force_refiner_object
#model_configuration_include_refiner_number_of_steps_object = {
# "sdxl_2023-11-12": 1,
# "sdxl_2023-09-05": 1
#}
####################
hugging_face_refiner_partial_path = "stabilityai/stable-diffusion-xl-refiner-1.0"
hugging_face_upscaler_partial_path = "stabilityai/sd-x2-latent-upscaler"
####################
base_model_model_configuration_defaults_object = {
"sdxl": "sdxl_default",
"photoreal": "photoreal_default",
"sdxl_turbo": "sdxl_turbo_default",
"sd_1_5_runwayml": "sd_1_5_runwayml_default"
}
####################
#
# Links:
#
# SD-XL 1.0-base Model Card
# https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0
#
# SD-XL 1.0-refiner Model Card
# https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0
#
# Stable Diffusion x2 latent upscaler model card
# https://huggingface.co/stabilityai/sd-x2-latent-upscaler
#
# PhotoReal
# 3.7.5: https://huggingface.co/circulus/canvers-real-v3.7.5
# 3.6: https://huggingface.co/circulus/canvers-realistic-v3.6
#
# SDXL Turbo
# https://huggingface.co/stabilityai/sdxl-turbo
#
# Stable Diffusion v1-5 (runwayml)
# https://huggingface.co/runwayml/stable-diffusion-v1-5
#
####################
#
# Determine automatically if on CPU or GPU
#
# CPU will not work on Windows.
#
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
PYTORCH_CUDA_ALLOC_CONF = {
"max_split_size_mb": 8000
}
torch.cuda.max_memory_allocated(
device = device
)
torch.cuda.empty_cache()
if device == "cpu":
default_base_model = "sdxl_turbo"
####################
default_prompt = "black cat"
default_negative_prompt = ""
default_width = 768
default_height = 768
default_guidance_scale_value = 7
default_base_model_base_model_num_inference_steps = 50
default_base_model_base_model_num_inference_steps_for_sdxl_turbo = 2
#default_seed_maximum = 999999999999999999
default_seed_maximum = 1000000000000000000
default_seed_value = 876678173805928800
# If you turn off the refiner it will not be available in the display unless
# you select an online configuration option that requires it.
enable_refiner = 1
enable_upscaler = 1
# Selected on form as a default?
default_refiner_selected = 0
default_upscaler_selected = 0
# Accordion visible on load?
#
# 0 If selected as default, will be open. Otherwise, closed.
# 1 Always starts open
default_refiner_accordion_open = 1
default_upscaler_accordion_open = 1
# xFormers:
#
# https://huggingface.co/docs/diffusers/optimization/xformers
use_xformers = 1
# Scaled dot product attention (SDPA) is used by default for PyTorch 2.0. To
# use default instead, set this to 1.
#
# https://huggingface.co/docs/diffusers/optimization/torch2.0#scaled-dot-product-attention
use_default_attn_processor = 0
display_xformers_usage_in_prompt_info = 1
include_transformers_version_in_prompt_info = 1
display_default_attn_processor_usage_in_prompt_info = 1
# You can't select both sequential and model cpu offloading. If you select
# both, model cpu offloading will be used.
use_sequential_cpu_offload_for_base_model = 1
use_sequential_cpu_offload_for_refiner = 1
use_sequential_cpu_offload_for_upscaler = 1
use_model_cpu_offload_for_base_model = 0
use_model_cpu_offload_for_refiner = 0
use_model_cpu_offload_for_upscaler = 0
if default_base_model == "photoreal":
# PhotoReal
default_seed_value = 3648905360627576
elif default_base_model == "sdxl_turbo":
# SDXL Turbo
default_seed_value = 2725116121543
#elif default_base_model == "sd_1_5_runwayml":
# SD 1.5
else:
# SDXL
default_width = 1024
default_height = 1024
default_guidance_scale_value = 10
# Must be multiple of 8
width_and_height_input_slider_steps = 8
opening_html = ""
if device == "cpu":
opening_html = "<span style=\"font-weight: bold; color: #c00;\">THIS APP IS EXCEPTIONALLY SLOW!</span><br/>This app is not running on a GPU. The first time it loads after the space is rebuilt it might take 10 minutes to generate a SDXL Turbo image. It may take 2 to 3 minutes after that point to do two steps. For other models, it may take hours to create a single image."
ending_html = """This app allows you to try to match images that can be generated using several tools online. (<a href=\"https://huggingface.co/spaces/Manjushri/SDXL-1.0\" target=\"_blank\">Stable Diffusion XL</a>, <a href=\"https://huggingface.co/spaces/Manjushri/PhotoReal-V3.7.5\" target=\"_blank\">PhotoReal with SDXL 1.0 Refiner</a> and <a href=\"https://huggingface.co/spaces/diffusers/unofficial-SDXL-Turbo-i2i-t2i\" target=\"_blank\">SDXL Turbo Unofficial Demo</a>) You can select the base model you want to use in the first dropdown option. The second configuration option involves choosing which version and/or configuration to choose. Certain configurations try to match the version online, taking into account changes that were made over time. Another configuration involves a default configuration I choose and is subject to change while I am still designing this app.
Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions. If you have a seed greater than 9007199254740992, it may not be processed correctly. Make sure the prompt matches the seed you entered. (shown in the \"Prompt Information\" section once you create an image) If it doesn't, set \"make_seed_selection_a_textbox\" to 1 in the script. This bug is described <a href=\"https://github.com/gradio-app/gradio/issues/5354\" target=\"_blank\">here</a>.
The original script for this app was written by <a href=\"https://huggingface.co/Manjushri\" target=\"_blank\">Manjushri</a>."""
refiner_and_upscaler_status_opening_html = "<div style=\"text-align: center;\">"
refiner_and_upscaler_status_closing_html = "</div>"
refiner_on_text = "Refiner is on. "
refiner_off_text = "Refiner is off. "
upscaler_on_text = "Upscaler is on. "
upscaler_off_text = "Upscaler is off. "
number_of_reserved_tokens = 2
###############################################################################
###############################################################################
#
#
#
# End Configurations
#
#
#
###############################################################################
###############################################################################
hugging_face_hub_is_offline = 0
if (
("HF_HUB_OFFLINE" in os.environ) and
(int(os.environ["HF_HUB_OFFLINE"]) == 1)
):
hugging_face_hub_is_offline = 1
if hugging_face_hub_is_offline == 0:
print ("Note: The Hugging Face cache directory does not automatically delete older data. Over time, it could eventually grow to use all the space on the drive it is on. You either need to manually clean out the folder occasionally or see Instructons.txt on how to not automatically update data once you have downloaded everything you need.")
try:
if (str(os.uname()).find("magicfixeseverything") >= 0):
script_being_run_on_hugging_face = 1
except:
script_being_run_on_hugging_face = 0
if script_being_run_on_hugging_face == 1:
use_custom_hugging_face_cache_dir = 0
auto_save_imagery = 0
show_messages_in_modal_on_page = 0
show_messages_in_command_prompt = 1
show_messages_in_modal_on_page = 1
if device == "cpu":
show_image_creation_progress_log = 1
ending_html = """
If you would like to download this app to run offline on a Windows computer that has a NVIDIA graphics card, click <a href=\"https://huggingface.co/spaces/magicfixeseverything/ai_image_creation/resolve/main/ai_image_creation.zip\">here</a> to download it.
""" + ending_html
if device == "cuda":
PYTORCH_CUDA_ALLOC_CONF = {
"max_split_size_mb": 8000
}
torch.cuda.max_memory_allocated(
device = device
)
torch.cuda.empty_cache()
saved_images_dir = main_dir + "/" + saved_images_folder_name
hugging_face_cache_dir = main_dir + "/" + cache_directory_folder_name
if not os.path.exists(hugging_face_cache_dir):
os.makedirs(hugging_face_cache_dir)
if auto_save_imagery == 1:
from datetime import datetime
import time
if log_generation_times == 1:
import time
if device == "cpu":
use_sequential_cpu_offload_for_base_model = 0
use_sequential_cpu_offload_for_refiner = 0
use_sequential_cpu_offload_for_upscaler = 0
use_model_cpu_offload_for_base_model = 0
use_model_cpu_offload_for_refiner = 0
use_model_cpu_offload_for_upscaler = 0
use_xformers = 0
if (
(use_sequential_cpu_offload_for_base_model == 1) and
(use_model_cpu_offload_for_base_model == 1)
):
use_sequential_cpu_offload_for_base_model = 0
if (
(use_sequential_cpu_offload_for_refiner == 1) and
(use_model_cpu_offload_for_refiner == 1)
):
use_sequential_cpu_offload_for_refiner = 0
if (
(use_sequential_cpu_offload_for_upscaler == 1) and
(use_model_cpu_offload_for_upscaler == 1)
):
use_sequential_cpu_offload_for_upscaler = 0
def error_function(
text_message
):
print (text_message)
gr.Error(text_message)
exit(1)
# Don't change this one
default_model_configuration_object = {
"sdxl_default": 1,
"photoreal_default": 1,
"sdxl_turbo_default": 1,
"sd_1_5_runwayml_default": 1
}
additional_prompt_info_html = ""
if auto_save_imagery == 1:
additional_prompt_info_html = " The image, and a text file with generation information, will be saved automatically."
if use_xformers == 1:
from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
if use_default_attn_processor == 1:
from diffusers.models.attention_processor import AttnProcessor
if (
default_base_model and
(default_base_model in base_model_object_of_model_configuration_arrays) and
(default_base_model in base_model_model_configuration_defaults_object)
):
default_model_configuration = base_model_model_configuration_defaults_object[default_base_model]
if default_model_configuration in model_configuration_names_object:
default_model_configuration_choices_array = []
for this_model_configuration in base_model_object_of_model_configuration_arrays[default_base_model]:
if model_configuration_names_object[this_model_configuration]:
default_model_configuration_choices_array.append(
model_configuration_names_object[this_model_configuration]
)
else:
error_function("A default configuration must be properly named in the code.")
else:
error_function("A default configuration must be properly configured in the code.")
else:
error_function("A default base model must be properly configured in the code.")
default_base_model_nicely_named_value = base_model_names_object[default_base_model]
default_model_configuration_nicely_named_value = model_configuration_names_object[default_model_configuration]
if enable_refiner != 1:
default_refiner_selected = 0
if enable_upscaler != 1:
default_upscaler_selected = 0
model_configuration_requires_refiner = 0
if default_model_configuration in model_configuration_force_refiner_object:
model_configuration_requires_refiner = model_configuration_force_refiner_object[default_model_configuration]
if model_configuration_requires_refiner == 1:
enable_refiner = 1
default_refiner_selected = 1
default_refine_option = "No"
if default_refiner_selected == 1:
default_refine_option = "Yes"
default_upscale_option = "No"
if default_upscaler_selected == 1:
default_upscale_option = "Yes"
is_default_config = 0
if default_model_configuration in default_model_configuration_object:
is_default_config = 1
default_refiner_and_upscaler_status_text = refiner_and_upscaler_status_opening_html
default_use_denoising_start_in_base_model_when_using_refiner_is_selected = False
if default_use_denoising_start_in_base_model_when_using_refiner == 1:
default_use_denoising_start_in_base_model_when_using_refiner_is_selected = True
default_base_model_output_to_refiner_is_in_latent_space_is_selected = False
if default_base_model_output_to_refiner_is_in_latent_space == 1:
default_base_model_output_to_refiner_is_in_latent_space_is_selected = True
refiner_default_config_accordion_visible = True
if (
(enable_refiner != 1) or
(is_default_config != 1)
):
refiner_default_config_accordion_visible = False
refiner_default_config_accordion_open = False
if (
(default_refiner_accordion_open == 1) or
(
(is_default_config == 1) and
(default_refiner_selected == 1)
)
):
refiner_default_config_accordion_open = True
refiner_online_config_accordion_visible = True
if (
(enable_refiner != 1) or
(is_default_config == 1)
):
refiner_online_config_accordion_visible = False
refiner_online_config_accordion_open = False
if (
(default_refiner_accordion_open == 1) or
(
(is_default_config != 1) and
(default_refiner_selected == 1)
)
):
refiner_online_config_accordion_open = True
refiner_group_visible = False
if enable_refiner == 1:
refiner_group_visible = True
if default_refiner_selected == 1:
default_refiner_and_upscaler_status_text += refiner_on_text
else:
default_refiner_and_upscaler_status_text += refiner_off_text
upscaler_accordion_open = False
if (
(default_upscaler_selected == 1) or
(default_upscaler_accordion_open == 1)
):
upscaler_accordion_open = True
upscaler_group_visible = False
if enable_upscaler == 1:
upscaler_group_visible = True
if default_upscaler_selected == 1:
default_refiner_and_upscaler_status_text += upscaler_on_text
else:
default_refiner_and_upscaler_status_text += upscaler_off_text
default_refiner_and_upscaler_status_text += refiner_and_upscaler_status_closing_html
image_gallery_array = []
prompt_information_array = []
default_negative_prompt_field_visibility = True
default_negative_prompt_for_sdxl_turbo_field_visibility = False
default_base_model_num_inference_steps_field_visibility = True
default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility = False
default_guidance_scale_field_visibility = True
default_guidance_scale_for_sdxl_turbo_field_visibility = False
if default_base_model == "sdxl_turbo":
default_negative_prompt_field_visibility = False
default_negative_prompt_for_sdxl_turbo_field_visibility = True
default_base_model_num_inference_steps_field_visibility = False
default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility = True
default_guidance_scale_field_visibility = False
default_guidance_scale_for_sdxl_turbo_field_visibility = True
last_model_configuration_name_value = ""
last_refiner_selected = ""
last_upscaler_selected = ""
if show_image_creation_progress_log == 1:
import time
current_progress_text = ""
current_actual_total_base_model_steps = ""
current_actual_total_refiner_steps = ""
current_actual_total_upscaler_steps = ""
default_base_model_choices_array = []
stored_model_configuration_names_object = {}
for this_base_model in base_model_array:
default_base_model_choices_array.append(
base_model_names_object[this_base_model]
)
stored_model_configuration = base_model_model_configuration_defaults_object[this_base_model]
stored_model_configuration_names_object[this_base_model] = model_configuration_names_object[stored_model_configuration]
###############################################################################
###############################################################################
#
#
#
#
#
#
# Functions
#
#
#
#
#
#
###############################################################################
###############################################################################
#####################
#
# Show Message
#
# Display message to user in model on web form and/or command prompt.
#
#####################
def show_message(
message_to_display
):
if show_messages_in_command_prompt == 1:
print (message_to_display)
if show_messages_in_modal_on_page == 1:
gr.Info(message_to_display)
#####################
#
# Convert Seconds
#
# Convert raw seconds to the numer of hours, minutes and seconds.
#
#####################
def convert_seconds(
seconds
):
# Google AI Code
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = seconds % 60
return hours, minutes, seconds
#####################
#
# Seed Not Valid
#
# Return True if seed is not valid.
#
#####################
def seed_not_valid(seed_num_str):
try:
seed_num = int(seed_num_str)
if (seed_num > 0) and (seed_num <= default_seed_maximum):
return False
else:
return True
except ValueError:
return True
#####################
#
# Numerical Bool
#
# Return 1 for anything that is True/Yes/1. Everything else is False.
#
#####################
def numerical_bool(
original_value
):
new_value = 0
if (
(original_value == 1) or
(original_value == "Yes") or
(original_value == "True") or
(original_value == True)
):
new_value = 1
return new_value
#####################
#
# Truncate Prompt
#
# Truncate a prompt. Get the actual prompt that will be used and save the
# part of the prompt that will not be used.
#
#####################
def truncate_prompt (
existing_prompt_text
):
# Only 77 tokens are allowed in the prompt. 2 are reserved, meaning it is
# truncated to 75. This happens automatically, but we want to tell people
# that
tokenizer = pipe.tokenizer
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens
prompt_text_words_array = existing_prompt_text.split(" ")
prompt_text_words_array_length = len(prompt_text_words_array)
prompt_text_words_index = 0
prompt_text_substring = ""
prompt_text_not_used_substring = ""
for prompt_text_word in prompt_text_words_array:
prompt_text_words_index += 1
substring_to_test = prompt_text_substring
if prompt_text_words_index > 1:
substring_to_test += " "
substring_to_test += prompt_text_word
token_length_of_substring_to_test = len(tokenizer.tokenize(substring_to_test))
if token_length_of_substring_to_test > max_token_length_of_model:
prompt_text_not_used_substring += prompt_text_word + " "
else:
prompt_text_substring = substring_to_test
return (
prompt_text_substring,
prompt_text_not_used_substring
)
#####################
#
# Update Prompt Info From Gallery
#
# If you select an image in the image gallery, display the prompt
# information for that image.
#
#####################
def update_prompt_info_from_gallery (
gallery_data: gr.SelectData
):
gallery_data_index = gallery_data.index
output_image_field_update = gr.Gallery(
selected_index = gallery_data_index
)
output_text_field_update = prompt_information_array[gallery_data_index]
return {
output_image_field: output_image_field_update,
output_text_field: output_text_field_update
}
#####################
#
# Callback Function for Base Model Progress
#
# Add the current step the generation is on in the base model to the web
# interface.
#
#####################
def callback_function_for_base_model_progress(
callback_pipe,
callback_step_index,
callback_timestep,
callback_kwargs
):
global current_progress_text
global current_base_model_generation_start_time
current_progress_text = "Base model steps complete... " + str(callback_step_index) + " of " + str(current_actual_total_base_model_steps)
if int(callback_step_index) == 0:
current_base_model_generation_start_time = time.time()
if int(callback_step_index) > 0:
seconds_per_step = ((time.time() - current_base_model_generation_start_time) / int(callback_step_index))
(
time_per_step_hours,
time_per_step_minutes,
time_per_step_seconds
) = convert_seconds(seconds_per_step)
if time_per_step_hours > 0:
hours_text = "hr"
if time_per_step_hours > 1:
hours_text = "hrs"
nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
elif time_per_step_minutes > 0:
nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
else:
nice_time_per_step = str(round(time_per_step_seconds, 2)) + " sec."
current_progress_text += "\n" + nice_time_per_step + " per step"
return {}
#####################
#
# Callback Function for Refiner Progress
#
# Add the current step the generation is on in the refiner to the web
# interface.
#
#####################
def callback_function_for_refiner_progress(
callback_pipe,
callback_step_index,
callback_timestep,
callback_kwargs
):
global current_progress_text
global current_refiner_generation_start_time
current_progress_text = "Refiner steps complete... " + str(callback_step_index) + " of " + str(current_actual_total_refiner_steps)
if int(callback_step_index) == 0:
current_refiner_generation_start_time = time.time()
if int(callback_step_index) > 0:
seconds_per_step = ((time.time() - current_refiner_generation_start_time) / int(callback_step_index))
(
time_per_step_hours,
time_per_step_minutes,
time_per_step_seconds
) = convert_seconds(seconds_per_step)
if time_per_step_hours > 0:
hours_text = "hr"
if time_per_step_hours > 1:
hours_text = "hrs"
nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
elif time_per_step_minutes > 0:
nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
else:
nice_time_per_step = str(round(time_per_step_seconds, 2)) + " sec."
current_progress_text += "\n" + nice_time_per_step + " per step"
return {}
#####################
#
# Update Log Progress
#
# This is called every second when "show_image_creation_progress_log" is
# set to 1. It displays the latest value in "current_progress_text".
#
#####################
def update_log_progress ():
global current_progress_text
log_text_field_update = gr.Textbox(
value = current_progress_text
)
return {
log_text_field: log_text_field_update
}
#####################
#
# Before Create Image Function
#
# This is loaded before the image creation begins.
#
#####################
def before_create_image_function ():
output_text_field_update = gr.Textbox(
visible = False
)
log_text_field_update = gr.Textbox(
value = "",
visible = True,
every = 1
)
generate_image_btn_update = gr.Button(
value = "Generating...",
variant = "secondary",
interactive = False
)
return {
output_text_field: output_text_field_update,
log_text_field: log_text_field_update,
generate_image_btn: generate_image_btn_update
}
#####################
#
# After Create Image Function
#
# This is loaded once image creation has completed.
#
#####################
def after_create_image_function ():
output_text_field_update = gr.Textbox(
visible = True
)
log_text_field_update = gr.Textbox(
value = "",
visible = False,
every = None
)
generate_image_btn_update = gr.Button(
value = "Generate",
variant = "primary",
interactive = True
)
return {
output_text_field: output_text_field_update,
log_text_field: log_text_field_update,
generate_image_btn: generate_image_btn_update
}
#####################
#
# Create Image Function
#
# This is the main image creation function.
#
#####################
def create_image_function (
base_model_field_index,
model_configuration_field_index,
prompt_text,
negative_prompt_text,
image_width,
image_height,
guidance_scale,
base_model_num_inference_steps,
base_model_num_inference_steps_field_for_sdxl_turbo,
actual_seed,
refining_selection_default_config_field_value,
refining_selection_online_config_normal_field_value,
refining_selection_online_config_automatically_selected_field_value,
refining_denoise_start_for_default_config_field_value,
refining_use_denoising_start_in_base_model_when_using_refiner_field_value,
refining_base_model_output_to_refiner_is_in_latent_space_field_value,
refining_denoise_start_for_online_config_field_value,
refining_number_of_iterations_for_online_config_field_value,
upscaling_selection_field_value,
upscaling_num_inference_steps
):
global current_progress_text
global current_actual_total_base_model_steps
global current_actual_total_refiner_steps
current_progress_text = ""
current_actual_total_base_model_steps = 0
current_actual_total_refiner_steps = 0
current_actual_total_upscaler_steps = 0
refining_selection_default_config_field_value = numerical_bool(refining_selection_default_config_field_value)
refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value)
refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value)
refining_use_denoising_start_in_base_model_when_using_refiner_field_value = numerical_bool(refining_use_denoising_start_in_base_model_when_using_refiner_field_value)
refining_base_model_output_to_refiner_is_in_latent_space_field_value = numerical_bool(refining_base_model_output_to_refiner_is_in_latent_space_field_value)
use_upscaler = numerical_bool(upscaling_selection_field_value)
base_model_name_value = base_model_array[base_model_field_index]
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_name_value][model_configuration_field_index]
current_actual_total_base_model_steps = base_model_num_inference_steps
current_actual_total_upscaler_steps = upscaling_num_inference_steps
is_default_config_state = 0
if model_configuration_name_value in default_model_configuration_object:
is_default_config_state = 1
use_refiner = 0
if (
(
(is_default_config_state == 1) and
refining_selection_default_config_field_value
) or (
(is_default_config_state != 1) and
(
(
(model_configuration_name_value not in model_configuration_force_refiner_object) and
refining_selection_online_config_normal_field_value
) or (
(model_configuration_name_value in model_configuration_force_refiner_object) and
refining_selection_online_config_automatically_selected_field_value
)
)
)
):
use_refiner = 1
if base_model_name_value == "sdxl_turbo":
negative_prompt_text = ""
base_model_num_inference_steps = base_model_num_inference_steps_field_for_sdxl_turbo
current_actual_total_base_model_steps = base_model_num_inference_steps
guidance_scale = 0
global last_model_configuration_name_value
global pipe
global refiner
global upscaler
global image_gallery_array
global prompt_information_array
if (
(last_model_configuration_name_value == "") or
(model_configuration_name_value != last_model_configuration_name_value)
):
current_progress_text = "Base model is loading."
show_message(current_progress_text)
if (last_model_configuration_name_value != ""):
# del pipe
if 'pipe' in globals():
del pipe
if 'refiner' in globals():
del refiner
if 'upscaler' in globals():
del upscaler
import gc
gc.collect()
if device == "cuda":
torch.cuda.empty_cache()
if base_model_name_value == "photoreal":
base_model_kwargs = {
"safety_checker": None,
"requires_safety_checker": False
}
elif base_model_name_value == "sdxl_turbo":
base_model_kwargs = {
"use_safetensors": True,
"safety_checker": None
}
if device == "cuda":
base_model_kwargs["variant"] = "fp16"
else:
base_model_kwargs = {
"use_safetensors": True
}
if device == "cuda":
base_model_kwargs["variant"] = "fp16"
if device == "cuda":
base_model_kwargs["torch_dtype"] = torch.float16
if use_custom_hugging_face_cache_dir == 1:
base_model_kwargs["cache_dir"] = hugging_face_cache_dir
pipe = DiffusionPipeline.from_pretrained(
model_configuration_links_object[model_configuration_name_value],
**base_model_kwargs
)
if use_model_cpu_offload_for_base_model == 1:
pipe.enable_model_cpu_offload()
if use_xformers == 1:
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
if use_sequential_cpu_offload_for_base_model == 1:
pipe.enable_sequential_cpu_offload()
if use_default_attn_processor == 1:
pipe.unet.set_default_attn_processor()
if device == "cuda":
torch.cuda.empty_cache()
else:
pipe.unet = torch.compile(
pipe.unet,
mode = "reduce-overhead",
fullgraph = True
)
last_model_configuration_name_value = model_configuration_name_value
if use_refiner == 1:
current_progress_text = "Refiner is loading."
show_message(current_progress_text)
refiner_kwargs = {
"use_safetensors": True
}
if device == "cuda":
refiner_kwargs["variant"] = "fp16"
refiner_kwargs["torch_dtype"] = torch.float16
if use_custom_hugging_face_cache_dir == 1:
refiner_kwargs["cache_dir"] = hugging_face_cache_dir
refiner = DiffusionPipeline.from_pretrained(
hugging_face_refiner_partial_path,
**refiner_kwargs
)
if use_model_cpu_offload_for_refiner == 1:
refiner.enable_model_cpu_offload()
if use_xformers == 1:
refiner.enable_xformers_memory_efficient_attention()
refiner = refiner.to(device)
if use_sequential_cpu_offload_for_refiner == 1:
refiner.enable_sequential_cpu_offload()
if use_default_attn_processor == 1:
refiner.unet.set_default_attn_processor()
if device == "cuda":
torch.cuda.empty_cache()
else:
refiner.unet = torch.compile(
refiner.unet,
mode = "reduce-overhead",
fullgraph = True
)
if use_upscaler == 1:
current_progress_text = "Upscaler is loading."
show_message(current_progress_text)
upscaler_kwargs = {
"use_safetensors": True
}
if device == "cuda":
upscaler_kwargs["torch_dtype"] = torch.float16
if use_custom_hugging_face_cache_dir == 1:
upscaler_kwargs["cache_dir"] = hugging_face_cache_dir
upscaler = DiffusionPipeline.from_pretrained(
hugging_face_upscaler_partial_path,
**upscaler_kwargs
)
if use_model_cpu_offload_for_upscaler == 1:
upscaler.enable_model_cpu_offload()
if use_xformers == 1:
upscaler.enable_xformers_memory_efficient_attention()
upscaler = upscaler.to(device)
if use_sequential_cpu_offload_for_upscaler == 1:
upscaler.enable_sequential_cpu_offload()
if use_default_attn_processor == 1:
upscaler.unet.set_default_attn_processor()
if device == "cuda":
torch.cuda.empty_cache()
else:
upscaler.unet = torch.compile(
upscaler.unet,
mode = "reduce-overhead",
fullgraph = True
)
if log_generation_times == 1:
start_time = time.time()
# Only 77 tokens are allowed in the prompt. 2 are reserved, meaning it is
# truncated to 75. This happens automatically, but we want to tell people
# that
tokenizer = pipe.tokenizer
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens
token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text))
token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text))
prompt_text_not_used_substring = ""
message_about_prompt_truncation = ""
if token_length_of_prompt_text > max_token_length_of_model:
(
prompt_text,
prompt_text_not_used_substring
) = truncate_prompt(
prompt_text
)
message_about_prompt_truncation += "Your prompt has been truncated because it is too long. This part has been truncated:<br/><br/><span style=\"font-style: italic;\">" + prompt_text_not_used_substring + "</span>"
negative_prompt_text_not_used_substring = ""
if token_length_of_negative_prompt_text > max_token_length_of_model:
(
negative_prompt_text,
negative_prompt_text_not_used_substring
) = truncate_prompt(
negative_prompt_text
)
if len(message_about_prompt_truncation) > 0:
message_about_prompt_truncation += "<br/><br/>"
message_about_prompt_truncation += "Your negative prompt has been truncated because it is too long. This part has been truncated:<br/><br/><span style=\"font-style: italic;\">" + negative_prompt_text_not_used_substring + "</span>"
prompt_truncated_field_udpate = gr.HTML(
value = "",
visible = False
)
if len(message_about_prompt_truncation) > 0:
prompt_truncated_field_udpate = gr.HTML(
value = "<div style=\"padding: 10px; background: #fff;\"><span style=\"font-weight: bold;\">Note</span>: " + message_about_prompt_truncation + "</div>",
visible = True
)
show_message("Note: Part of your prompt has been truncated automatically because it was too long.")
actual_seed = int(actual_seed)
if actual_seed == 0:
import random
default_seed_maximum_for_random = default_seed_maximum
if default_seed_maximum_for_random > 9007199254740992:
# If above this number, seeds may not be able to be entered into slider properly.
default_seed_maximum_for_random = 9007199254740992
actual_seed = int(random.randrange(1, 10**len(str(default_seed_maximum))))
if seed_not_valid(actual_seed):
raise Exception("Seed is not valid.")
generator = torch.manual_seed(actual_seed)
if show_image_creation_progress_log == 1:
callback_to_do_for_base_model_progress = callback_function_for_base_model_progress
callback_to_do_for_refiner_progress = callback_function_for_refiner_progress
else:
callback_to_do_for_base_model_progress = None
callback_to_do_for_refiner_progress = None
if model_configuration_name_value.find("default") < 0:
#
#
#
# Attempt To Match Online Configurations
#
#
#
prompt = prompt_text
negative_prompt = negative_prompt_text
width = image_width
height = image_height
scale = guidance_scale
steps = base_model_num_inference_steps
refining = use_refiner
if refining == 1:
refining = "Yes"
upscaling = use_upscaler
if upscaling == 1:
upscaling = "Yes"
prompt_2 = ""
negative_prompt_2 = ""
high_noise_frac = refining_denoise_start_for_online_config_field_value
if (
model_configuration_name_value == "sdxl_2023-11-12" or
model_configuration_name_value == "sdxl_2023-09-05"
):
n_steps = refining_number_of_iterations_for_online_config_field_value
upscaling_num_inference_steps = 15
if model_configuration_name_value == "sdxl_2023-09-05":
upscaling_num_inference_steps = 5
current_actual_total_upscaler_steps = upscaling_num_inference_steps
if show_messages_in_command_prompt == 1:
print ("Initial image creation has begun.");
if show_image_creation_progress_log == 1:
current_progress_text = "Initial image creation has begun."
int_image = pipe(
prompt,
prompt_2=prompt_2,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
num_inference_steps=steps,
height=height,
width=width,
guidance_scale=scale,
num_images_per_prompt=1,
generator=generator,
output_type="latent",
callback_on_step_end=callback_to_do_for_base_model_progress
).images
if show_messages_in_command_prompt == 1:
print ("Refiner steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Refining is beginning."
current_actual_total_refiner_steps = int(int(n_steps) * float(high_noise_frac))
nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value)
refiner_info_for_info_about_prompt_lines_array = [
"Refiner? Yes"
"Refiner denoise start %: " + nice_refiner_denoise_start,
"Refiner number of iterations: " + str(refining_number_of_iterations_for_online_config_field_value),
"Actual Refining Steps: " + str(current_actual_total_refiner_steps)
]
image = refiner(
prompt=prompt,
prompt_2=prompt_2,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
image=int_image,
num_inference_steps=n_steps,
denoising_start=high_noise_frac,
callback_on_step_end=callback_to_do_for_refiner_progress
).images[0]
if upscaling == 'Yes':
if show_messages_in_command_prompt == 1:
print ("Upscaler steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Upscaling in progress.\n(step by step progress not displayed)"
# Changed
#
# num_inference_steps=15
#
upscaled = upscaler(
prompt=prompt,
negative_prompt=negative_prompt,
image=image,
num_inference_steps=upscaling_num_inference_steps,
guidance_scale=0
).images[0]
if device == "cuda":
torch.cuda.empty_cache()
image_to_return = upscaled
else:
if device == "cuda":
torch.cuda.empty_cache()
image_to_return = image
elif (
model_configuration_name_value == "photoreal_2023-11-12" or
model_configuration_name_value == "photoreal_2023-09-01"
):
Prompt = prompt
upscale = refining # Not a mistake. This is wrong in code.
if upscale == "Yes":
if show_messages_in_command_prompt == 1:
print ("Initial image creation has begun.");
if show_image_creation_progress_log == 1:
current_progress_text = "Initial image creation has begun."
int_image = pipe(
Prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
num_inference_steps=steps,
guidance_scale=scale,
callback_on_step_end=callback_to_do_for_base_model_progress
).images
if show_messages_in_command_prompt == 1:
print ("Refiner steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Refining is beginning."
default_steps_in_diffusers = 50
current_actual_total_refiner_steps = int(default_steps_in_diffusers * float(high_noise_frac))
refiner_info_for_info_about_prompt_lines_array = [
"Refiner? Yes"
"Refiner denoise start %: " + nice_refiner_denoise_start,
"Refiner number of iterations: " + str(current_actual_total_refiner_steps),
"Actual Refining Steps: " + str(current_actual_total_refiner_steps)
]
image = refiner(
Prompt,
negative_prompt=negative_prompt,
image=int_image,
num_inference_steps=default_steps_in_diffusers,
denoising_start=high_noise_frac,
callback_on_step_end=callback_to_do_for_refiner_progress
).images[0]
else:
if show_messages_in_command_prompt == 1:
print ("Image creation has begun.");
if show_image_creation_progress_log == 1:
current_progress_text = "Image creation has begun."
image = pipe(
Prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
num_inference_steps=steps,
guidance_scale=scale,
callback_on_step_end=callback_to_do_for_base_model_progress
).images[0]
image_to_return = image
else:
#
#
#
# My Configurations
#
#
#
if use_refiner == 1:
if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1:
denoising_end = refining_denoise_start_for_default_config_field_value
current_actual_total_base_model_steps = int(base_model_num_inference_steps * float(refining_denoise_start_for_default_config_field_value))
else:
denoising_end = None
output_type_before_refiner = "pil"
if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1:
output_type_before_refiner = "latent"
current_actual_total_refiner_steps = (base_model_num_inference_steps - int(base_model_num_inference_steps * float(refining_denoise_start_for_default_config_field_value)))
refiner_info_for_info_about_prompt_lines_array = [
"Refiner? Yes"
]
nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value)
if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1:
refiner_info_for_info_about_prompt_lines_array.extend([
"Set \"denoising_end\" in base model generation? Yes",
"Base model denoise end %: " + nice_refiner_denoise_start,
"Actual Base Model Steps: " + str(current_actual_total_base_model_steps)
])
else:
refiner_info_for_info_about_prompt_lines_array.extend([
"Set \"denoising_end\" in base model generation? No",
])
refiner_info_for_info_about_prompt_lines_array.extend([
"Refiner denoise start %: " + nice_refiner_denoise_start,
"Actual Refining Steps: " + str(current_actual_total_refiner_steps)
])
if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1:
refiner_info_for_info_about_prompt_lines_array.extend([
"Base model output in latent space before refining? Yes",
])
else:
refiner_info_for_info_about_prompt_lines_array.extend([
"Base model output in latent space before refining? No",
])
if use_upscaler == 1:
if show_messages_in_command_prompt == 1:
print ("Will create initial image, then refine and then upscale.");
print ("Initial image steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Initial image creation has begun."
intitial_image = pipe(
prompt = prompt_text,
negative_prompt = negative_prompt_text,
width = image_width,
height = image_height,
num_inference_steps = base_model_num_inference_steps,
guidance_scale = guidance_scale,
num_images_per_prompt = 1,
generator = generator,
denoising_end = denoising_end,
output_type = output_type_before_refiner,
callback_on_step_end = callback_to_do_for_base_model_progress
).images
if show_messages_in_command_prompt == 1:
print ("Refiner steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Refining is beginning."
refined_image = refiner(
prompt = prompt_text,
negative_prompt = negative_prompt_text,
image = intitial_image,
num_inference_steps = base_model_num_inference_steps,
denoising_start = refining_denoise_start_for_default_config_field_value,
output_type = "pil",
callback_on_step_end = callback_to_do_for_refiner_progress
).images
if show_messages_in_command_prompt == 1:
print ("Upscaler steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Upscaling in progress.\n(step by step progress not displayed)"
upscaled_image = upscaler(
prompt = prompt_text,
negative_prompt = negative_prompt_text,
image = refined_image,
num_inference_steps = upscaling_num_inference_steps,
guidance_scale = 0
).images[0]
if device == "cuda":
torch.cuda.empty_cache()
image_to_return = upscaled_image
else:
if show_messages_in_command_prompt == 1:
print ("Will create initial image and then refine.");
print ("Initial image steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Initial image creation has begun."
intitial_image = pipe(
prompt = prompt_text,
negative_prompt = negative_prompt_text,
width = image_width,
height = image_height,
num_inference_steps = base_model_num_inference_steps,
guidance_scale = guidance_scale,
num_images_per_prompt = 1,
generator = generator,
denoising_end = denoising_end,
output_type = output_type_before_refiner,
callback_on_step_end = callback_to_do_for_base_model_progress
).images
if show_messages_in_command_prompt == 1:
print ("Refiner steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Refining is beginning."
refined_image = refiner(
prompt = prompt_text,
negative_prompt = negative_prompt_text,
image = intitial_image,
num_inference_steps = base_model_num_inference_steps,
denoising_start = refining_denoise_start_for_default_config_field_value,
callback_on_step_end = callback_to_do_for_refiner_progress
).images[0]
if device == "cuda":
torch.cuda.empty_cache()
image_to_return = refined_image
else:
if use_upscaler == 1:
if show_messages_in_command_prompt == 1:
print ("Will create initial image and then upscale.");
print ("Initial image steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Initial image creation has begun."
intitial_image = pipe(
prompt = prompt_text,
negative_prompt = negative_prompt_text,
width = image_width,
height = image_height,
num_inference_steps = base_model_num_inference_steps,
guidance_scale = guidance_scale,
num_images_per_prompt = 1,
generator = generator,
output_type = "pil",
callback_on_step_end = callback_to_do_for_base_model_progress
).images
if show_messages_in_command_prompt == 1:
print ("Upscaler steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Upscaling in progress.\n(step by step progress not displayed)"
upscaled_image = upscaler(
prompt = prompt_text,
negative_prompt = negative_prompt_text,
image = intitial_image,
num_inference_steps = upscaling_num_inference_steps,
guidance_scale = 0
).images[0]
if device == "cuda":
torch.cuda.empty_cache()
image_to_return = upscaled_image
else:
if show_messages_in_command_prompt == 1:
print ("Will create image (no refining or upscaling).");
print ("Image steps...");
if show_image_creation_progress_log == 1:
current_progress_text = "Image creation has begun."
image = pipe(
prompt = prompt_text,
negative_prompt = negative_prompt_text,
width = image_width,
height = image_height,
num_inference_steps = base_model_num_inference_steps,
guidance_scale = guidance_scale,
num_images_per_prompt = 1,
generator = generator,
callback_on_step_end = callback_to_do_for_base_model_progress
).images[0]
if device == "cuda":
torch.cuda.empty_cache()
image_to_return = image
#
#
#
# Prompt Information
#
#
#
nice_model_name = base_model_names_object[base_model_name_value] + " (" + model_configuration_links_object[model_configuration_name_value] + ")"
info_about_prompt_lines_array = [
"Prompt:\n" + prompt_text
]
if len(negative_prompt_text) > 0:
info_about_prompt_lines_array.extend([
"Negative Prompt:\n" + negative_prompt_text
])
dimensions_title = "Dimensions"
if use_upscaler == 1:
dimensions_title = "Original Dimensions"
info_about_prompt_lines_array.extend([
dimensions_title + ": " + str(image_width) + "x" + str(image_height) + " px"
])
if use_upscaler == 1:
upscaled_image_width = int(image_width * 2)
upscaled_image_height = int(image_height * 2)
info_about_prompt_lines_array.extend([
"Upscaled Dimensions: " + str(upscaled_image_width) + "x" + str(upscaled_image_height) + " px"
])
info_about_prompt_lines_array.extend([
"Seed: " + str(actual_seed)
])
if int(guidance_scale) > 0:
info_about_prompt_lines_array.extend([
"Guidance Scale: " + str(guidance_scale)
])
info_about_prompt_lines_array.extend([
"Steps: " + str(base_model_num_inference_steps),
"Model: " + nice_model_name
])
if use_refiner == 1:
# Default Configuration
info_about_prompt_lines_array.extend(refiner_info_for_info_about_prompt_lines_array)
if use_upscaler == 1:
info_about_prompt_lines_array.extend([
"Upscaled (2x)? Yes",
"Refiner Steps: " + str(upscaling_num_inference_steps)
])
if log_generation_times == 1:
end_time = time.time()
generation_time_in_seconds = (end_time - start_time)
(
generation_partial_hours,
generation_partial_minutes,
generation_partial_seconds
) = convert_seconds(generation_time_in_seconds)
if generation_partial_hours > 0:
hours_text = "hr"
if generation_partial_hours > 1:
hours_text = "hrs"
nice_generation_time = str(int(generation_partial_hours)) + " " + hours_text + ". " + str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
elif generation_partial_minutes > 0:
nice_generation_time = str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
else:
nice_generation_time = str(round(generation_time_in_seconds, 2)) + " sec."
info_about_prompt_lines_array.extend([
"Time: " + nice_generation_time
])
if len(prompt_text_not_used_substring) > 0:
info_about_prompt_lines_array.extend([
"End of Prompt Truncated: " + prompt_text_not_used_substring
])
if len(negative_prompt_text_not_used_substring) > 0:
info_about_prompt_lines_array.extend([
"End of Negative Prompt Truncated: " + negative_prompt_text_not_used_substring
])
if display_xformers_usage_in_prompt_info > 0:
nice_xformers_usage = "No"
if use_xformers == 1:
nice_xformers_usage = "Yes"
if include_transformers_version_in_prompt_info == 1:
import transformers
nice_xformers_usage += " (version " + str(transformers.__version__) + ")"
info_about_prompt_lines_array.extend([
"xFormers Used?: " + nice_xformers_usage
])
if display_default_attn_processor_usage_in_prompt_info > 0:
nice_default_attn_processor_usage = "No"
if use_default_attn_processor == 1:
nice_default_attn_processor_usage = "Yes"
info_about_prompt_lines_array.extend([
"Default AttnProcessor Used?: " + nice_default_attn_processor_usage
])
info_about_prompt = '\n'.join(info_about_prompt_lines_array)
if auto_save_imagery == 1:
if not os.path.exists(saved_images_dir):
os.makedirs(saved_images_dir)
yy_mm_dd_date_stamp = datetime.today().strftime('%Y-%m-%d')
saved_images_date_dir = saved_images_dir + "/" + yy_mm_dd_date_stamp + "/"
if not os.path.exists(saved_images_date_dir):
os.makedirs(saved_images_date_dir)
image_count = 1
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count)
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png"
while os.path.exists(saved_image_path_and_file):
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count)
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png"
image_count += 1
image_to_return_file = image_to_return.save(saved_image_path_and_file)
saved_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt"
prompt_info_file_handle = open(saved_text_file_path_and_file, "w")
prompt_info_file_handle.writelines(info_about_prompt)
prompt_info_file_handle.close()
if use_image_gallery == 1:
image_gallery_array.insert(0, image_to_return)
prompt_information_array.insert(0, info_about_prompt)
output_image_field_update = gr.Gallery(
value = image_gallery_array,
selected_index = 0
)
else:
output_image_field_update = gr.Image(
value = image_to_return
)
if show_messages_in_command_prompt == 1:
print ("Image created.")
return {
output_image_field: output_image_field_update,
output_text_field: info_about_prompt,
prompt_truncated_field: prompt_truncated_field_udpate
}
#####################
#
# Cancel Image Processing
#
# When running on Windows, this is an attempt at closing the command
# prompt from the web display. It's really not worth having this. You can
# just close the prompt. I would like a nice way to cancel image
# creation, but couldn't figure that out.
#
#####################
def cancel_image_processing():
# I simply don't know how to stop the image generation without closing
# the command prompt. Doing that requires the code below twice for some
# reason.
#
# Method:
# https://stackoverflow.com/questions/67146623/how-to-close-the-command-prompt-from-python-script-directly
gr.Warning("The command prompt window has been closed. Any image generation in progress has been stopped. To generate any other images, you will need to launch the command prompt again.")
os.system('title kill_window')
os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"')
os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"')
#####################
#
# Base Model Field Update Function
#
# When the base model dropdown changes, this function is run.
#
#####################
def base_model_field_update_function(
base_model_field_index
):
base_model_field_value = base_model_array[base_model_field_index]
if base_model_field_value in base_model_array:
if base_model_field_value in base_model_object_of_model_configuration_arrays:
model_configuration_choices_array_update = []
for this_model_configuration in base_model_object_of_model_configuration_arrays[base_model_field_value]:
model_configuration_choices_array_update.append(
model_configuration_names_object[this_model_configuration]
)
if base_model_field_value in base_model_model_configuration_defaults_object:
model_configuration_field_selected_value = stored_model_configuration_names_object[base_model_field_value]
model_configuration_field_update = gr.Dropdown(
choices = model_configuration_choices_array_update,
value = model_configuration_field_selected_value
)
negative_prompt_field_visibility = True
negative_prompt_for_sdxl_turbo_field_visibility = False
base_model_num_inference_steps_field_visibility = True
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = False
guidance_scale_field_visibility = True
guidance_scale_for_sdxl_turbo_field_visibility = False
if base_model_field_value == "sdxl_turbo":
negative_prompt_field_visibility = False
negative_prompt_for_sdxl_turbo_field_visibility = True
base_model_num_inference_steps_field_visibility = False
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = True
guidance_scale_field_visibility = False
guidance_scale_for_sdxl_turbo_field_visibility = True
negative_prompt_field_update = gr.Textbox(
visible = negative_prompt_field_visibility
)
negative_prompt_for_sdxl_turbo_field_update = gr.HTML(
visible = negative_prompt_for_sdxl_turbo_field_visibility
)
base_model_num_inference_steps_field_update = gr.Slider(
visible = base_model_num_inference_steps_field_visibility
)
base_model_num_inference_steps_field_for_sdxl_turbo_update = gr.Slider(
visible = base_model_num_inference_steps_field_for_sdxl_turbo_visibility
)
guidance_scale_field_update = gr.Slider(
visible = guidance_scale_field_visibility
)
guidance_scale_for_sdxl_turbo_field_update = gr.HTML(
visible = guidance_scale_for_sdxl_turbo_field_visibility
)
return {
model_configuration_field: model_configuration_field_update,
negative_prompt_field: negative_prompt_field_update,
negative_prompt_for_sdxl_turbo_field: negative_prompt_for_sdxl_turbo_field_update,
base_model_num_inference_steps_field: base_model_num_inference_steps_field_update,
base_model_num_inference_steps_field_for_sdxl_turbo_field: base_model_num_inference_steps_field_for_sdxl_turbo_update,
guidance_scale_field: guidance_scale_field_update,
guidance_scale_for_sdxl_turbo_field: guidance_scale_for_sdxl_turbo_field_update
}
error_function("Error")
#####################
#
# Model Configuration Field Update Function
#
# When the model configuration dropdown changes, this function is run.
#
#####################
def model_configuration_field_update_function(
base_model_field_index,
model_configuration_field_index
):
base_model_field_value = base_model_array[base_model_field_index]
if base_model_field_value in base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]:
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]
stored_model_configuration_names_object[base_model_field_value] = model_configuration_names_object[model_configuration_name_value]
is_default_config_state = 0
if model_configuration_name_value in default_model_configuration_object:
is_default_config_state = 1
negative_prompt_field_visibility = True
negative_prompt_for_sdxl_turbo_field_visibility = False
base_model_num_inference_steps_field_visibility = True
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = False
guidance_scale_field_visibility = True
guidance_scale_for_sdxl_turbo_field_visibility = False
if base_model_field_value == "sdxl_turbo":
negative_prompt_field_visibility = False
negative_prompt_for_sdxl_turbo_field_visibility = True
base_model_num_inference_steps_field_visibility = False
base_model_num_inference_steps_field_for_sdxl_turbo_visibility = True
guidance_scale_field_visibility = False
guidance_scale_for_sdxl_turbo_field_visibility = True
negative_prompt_field_update = gr.Textbox(
visible = negative_prompt_field_visibility
)
negative_prompt_for_sdxl_turbo_field_update = gr.HTML(
visible = negative_prompt_for_sdxl_turbo_field_visibility
)
base_model_num_inference_steps_field_update = gr.Slider(
visible = base_model_num_inference_steps_field_visibility
)
base_model_num_inference_steps_field_for_sdxl_turbo_update = gr.Slider(
visible = base_model_num_inference_steps_field_for_sdxl_turbo_visibility
)
guidance_scale_field_update = gr.Slider(
visible = guidance_scale_field_visibility
)
guidance_scale_for_sdxl_turbo_field_update = gr.HTML(
visible = guidance_scale_for_sdxl_turbo_field_visibility
)
refiner_default_config_accordion_visibility = False
refiner_online_config_accordion_visibility = True
if is_default_config_state == 1:
refiner_default_config_accordion_visibility = True
refiner_online_config_accordion_visibility = False
refining_selection_automatically_selected_message_field_visibility = False
refining_selection_online_config_normal_field_visibility = True
refining_selection_online_config_automatically_selected_field_visibility = False
if model_configuration_name_value in model_configuration_force_refiner_object:
refining_selection_automatically_selected_message_field_visibility = True
refining_selection_online_config_normal_field_visibility = False
refining_selection_online_config_automatically_selected_field_visibility = True
refining_number_of_iterations_for_online_config_field_visibility = False
if model_configuration_name_value in model_configuration_include_refiner_number_of_steps_object:
refining_number_of_iterations_for_online_config_field_visibility = True
refiner_default_config_accordion_update = gr.Accordion(
visible = refiner_default_config_accordion_visibility
)
refiner_online_config_accordion_update = gr.Accordion(
visible = refiner_online_config_accordion_visibility
)
refining_selection_automatically_selected_message_field_update = gr.Markdown(
visible = refining_selection_automatically_selected_message_field_visibility
)
refining_selection_online_config_normal_field_update = gr.Radio(
visible = refining_selection_online_config_normal_field_visibility
)
refining_selection_online_config_automatically_selected_field_update = gr.Radio(
visible = refining_selection_online_config_automatically_selected_field_visibility
)
refining_number_of_iterations_for_online_config_field_update = gr.Radio(
visible = refining_number_of_iterations_for_online_config_field_visibility
)
return {
negative_prompt_field: negative_prompt_field_update,
negative_prompt_for_sdxl_turbo_field: negative_prompt_for_sdxl_turbo_field_update,
base_model_num_inference_steps_field: base_model_num_inference_steps_field_update,
base_model_num_inference_steps_field_for_sdxl_turbo_field: base_model_num_inference_steps_field_for_sdxl_turbo_update,
guidance_scale_field: guidance_scale_field_update,
guidance_scale_for_sdxl_turbo_field: guidance_scale_for_sdxl_turbo_field_update,
refiner_default_config_accordion: refiner_default_config_accordion_update,
refiner_online_config_accordion: refiner_online_config_accordion_update,
refining_selection_automatically_selected_message_field: refining_selection_automatically_selected_message_field_update,
refining_selection_online_config_normal_field: refining_selection_online_config_normal_field_update,
refining_selection_online_config_automatically_selected_field: refining_selection_online_config_automatically_selected_field_update,
refining_number_of_iterations_for_online_config_field: refining_number_of_iterations_for_online_config_field_update
}
error_function("Error")
#####################
#
# Update Refiner and Upscaler Status Function
#
# When the refiner or upscaler is turned on or off, a text message is
# printed on the page. That needs to be updated.
#
#####################
def update_refiner_and_upscaler_status_function(
base_model_field_index,
model_configuration_field_index,
refining_selection_default_config_field_value,
refining_selection_online_config_normal_field_value,
refining_selection_online_config_automatically_selected_field_value,
upscaling_selection_field_value
):
base_model_field_value = base_model_array[base_model_field_index]
if base_model_field_value in base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]:
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]
is_default_config_state = 0
if model_configuration_name_value in default_model_configuration_object:
is_default_config_state = 1
refining_selection_default_config_field_value = numerical_bool(refining_selection_default_config_field_value)
refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value)
refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value)
upscaling_selection_field_value = numerical_bool(upscaling_selection_field_value)
refiner_and_upscaler_status_text = refiner_and_upscaler_status_opening_html
if (
(
(is_default_config_state == 1) and
refining_selection_default_config_field_value
) or (
(is_default_config_state != 1) and
(
(
(model_configuration_name_value not in model_configuration_force_refiner_object) and
refining_selection_online_config_normal_field_value
) or (
(model_configuration_name_value in model_configuration_force_refiner_object) and
refining_selection_online_config_automatically_selected_field_value
)
)
)
):
refiner_and_upscaler_status_text += refiner_on_text
else:
refiner_and_upscaler_status_text += refiner_off_text
if upscaling_selection_field_value == 1:
refiner_and_upscaler_status_text += upscaler_on_text
else:
refiner_and_upscaler_status_text += upscaler_off_text
refiner_and_upscaler_status_text += refiner_and_upscaler_status_closing_html
refiner_and_upscaler_text_field_update = gr.HTML(
value = refiner_and_upscaler_status_text
)
return {
refiner_and_upscaler_text_field: refiner_and_upscaler_text_field_update
}
error_function("Error")
###############################################################################
###############################################################################
#
#
#
#
#
#
# Create Web Display
#
#
#
#
#
#
###############################################################################
###############################################################################
# Hide border when yield is used:
# https://github.com/gradio-app/gradio/issues/5479
# .generating {border: none !important;}
#
# Remove orange border for generation progress.
# #generation_progress_id div {border: none;}
css_to_use = "footer{display:none !important}"
if show_image_creation_progress_log == 1:
css_to_use += "#generation_progress_id div {border: none;}"
with gr.Blocks(
title = "AI Image Creation",
css = css_to_use,
theme = gr.themes.Default(
spacing_size = gr.themes.sizes.spacing_md,
# spacing_size = gr.themes.sizes.spacing_sm,
radius_size = gr.themes.sizes.radius_none
),
analytics_enabled = False
) as sd_interface:
gr.Markdown(opening_html)
with gr.Row():
with gr.Column(scale = 1):
generate_image_btn = gr.Button(
value = "Generate",
variant = "primary"
)
with gr.Group():
with gr.Row():
prompt_field = gr.Textbox(
label = "Prompt (77 token limit):",
value = default_prompt
)
with gr.Row():
negative_prompt_field = gr.Textbox(
label = "Negative Prompt (77 token limit):",
value = default_negative_prompt,
visible = default_negative_prompt_field_visibility
)
with gr.Row():
negative_prompt_for_sdxl_turbo_field = gr.HTML(
value = "<div style=\"padding: 10px; text-align: center; background: #fff;\">Negative prompt is not used for SDXL Turbo.</div>",
visible = default_negative_prompt_for_sdxl_turbo_field_visibility
)
with gr.Group(
visible = refiner_group_visible
):
with gr.Accordion(
label = "Refiner (Default configuration)",
elem_id = "refiner_default_config_accordion_id",
open = refiner_default_config_accordion_open,
visible = refiner_default_config_accordion_visible
) as refiner_default_config_accordion:
#
#
#
# Refiner (Default configuration)
#
#
#
with gr.Row():
gr.Markdown("This can be used if the image has too much noise.")
with gr.Row():
refining_selection_default_config_field = gr.Radio(
choices = ["Yes", "No"],
value = default_refine_option,
show_label = False,
container = False
)
with gr.Row():
refining_denoise_start_for_default_config_field = gr.Slider(
label = "Refiner denoise start %",
minimum = 0.7,
maximum = 0.99,
value = 0.95,
step = 0.01
)
with gr.Row():
refiner_steps_text_field = gr.HTML(
value = ""
)
with gr.Row():
refining_use_denoising_start_in_base_model_when_using_refiner_field = gr.Checkbox(
label = "Use \"denoising_start\" value as \"denoising_end\" value in base model generation when using refiner",
value = default_use_denoising_start_in_base_model_when_using_refiner_is_selected,
interactive = True,
container = True
)
with gr.Row():
refining_base_model_output_to_refiner_is_in_latent_space_field = gr.Checkbox(
label = "Base model output in latent space instead of PIL image when using refiner",
value = default_base_model_output_to_refiner_is_in_latent_space_is_selected,
interactive = True,
container = True
)
with gr.Accordion(
label = "Refiner (Online configuration)",
elem_id = "refiner_online_config_accordion_id",
open = refiner_online_config_accordion_open,
visible = refiner_online_config_accordion_visible
) as refiner_online_config_accordion:
#
#
#
# Refiner (Online configuration)
#
#
#
refining_selection_automatically_selected_message_field_visible = False
refining_selection_online_config_normal_field_visible = True
refining_selection_online_config_automatically_selected_field_visible = False
if model_configuration_requires_refiner == 1:
refining_selection_automatically_selected_message_field_visible = True
refining_selection_online_config_normal_field_visible = False
refining_selection_online_config_automatically_selected_field_visible = True
with gr.Row():
refining_selection_automatically_selected_message_field = gr.Markdown(
value = "The online configuration you selected automatically uses the refiner.",
visible = refining_selection_automatically_selected_message_field_visible
)
with gr.Row():
refining_selection_online_config_normal_field = gr.Radio(
choices = ["Yes", "No"],
value = default_refine_option,
show_label = False,
container = False,
visible = refining_selection_online_config_normal_field_visible
)
with gr.Row():
refining_selection_online_config_automatically_selected_field = gr.Radio(
choices = ["Yes"],
value = "Yes",
show_label = False,
container = False,
visible = refining_selection_online_config_automatically_selected_field_visible
)
with gr.Row():
refining_denoise_start_for_online_config_field = gr.Slider(
label = "Refiner denoise start %",
minimum = 0.7,
maximum = 0.99,
value = 0.95,
step = 0.01
)
with gr.Row():
refining_number_of_iterations_for_online_config_field_visible = False
if default_model_configuration in model_configuration_include_refiner_number_of_steps_object:
refining_number_of_iterations_for_online_config_field_visible = True
refining_number_of_iterations_for_online_config_field = gr.Slider(
label = "Refiner number of iterations",
minimum = 1,
maximum = 100,
value = 100,
step = 1,
visible = refining_number_of_iterations_for_online_config_field_visible
)
with gr.Group(
visible = upscaler_group_visible
):
with gr.Accordion(
label = "Upscaler",
elem_id = "upscaler_accordion_id",
open = upscaler_accordion_open,
visible = upscaler_group_visible
):
#
#
#
# Upscaler
#
#
#
with gr.Row():
gr.Markdown("Upscale by 2x?")
with gr.Row():
upscaling_selection_field = gr.Radio(
choices = ['Yes', 'No'],
value = default_upscale_option,
show_label = False,
container = False
)
with gr.Row():
upscaling_num_inference_steps_field = gr.Slider(
label = "Upscaler number of iterations",
minimum = 1,
maximum = 100,
value = 100,
step = 1
)
if (
(enable_refiner == 1) or
(enable_upscaler == 1)
):
refiner_and_upscaler_text_field = gr.HTML(
value = default_refiner_and_upscaler_status_text
)
with gr.Column(scale = 1):
with gr.Group():
with gr.Row():
base_model_field = gr.Dropdown(
label = "Base Model:",
choices = default_base_model_choices_array,
value = default_base_model_nicely_named_value,
type = "index",
#info = "Main model type",
filterable = False,
min_width = 240,
interactive = True
)
model_configuration_field = gr.Dropdown(
label = "Configuration Type:",
choices = default_model_configuration_choices_array,
value = default_model_configuration_nicely_named_value,
type = "index",
#info = "See end of page for info.",
filterable = False,
min_width = 240,
interactive = True
)
with gr.Row():
image_width_field = gr.Slider(
label = "Width:",
minimum = 256,
maximum = 1024,
value = default_width,
step = width_and_height_input_slider_steps,
interactive = True
)
image_height_field = gr.Slider(
label = "Height:",
minimum = 256,
maximum = 1024,
value = default_height,
step = width_and_height_input_slider_steps,
interactive = True
)
with gr.Row():
base_model_num_inference_steps_field = gr.Slider(
label = "Steps:",
minimum = 1,
maximum = 100,
value = default_base_model_base_model_num_inference_steps,
step = 1,
visible = default_base_model_num_inference_steps_field_visibility,
interactive = True
)
with gr.Row():
base_model_num_inference_steps_field_for_sdxl_turbo_field = gr.Slider(
label = "Steps:",
info = "Try using only 1 or a couple of steps.",
minimum = 1,
maximum = 25,
value = default_base_model_base_model_num_inference_steps_for_sdxl_turbo,
step = 1,
visible = default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility,
interactive = True
)
with gr.Row():
guidance_scale_field = gr.Slider(
label = "Guidance Scale:",
minimum = 1,
maximum = 15,
value = default_guidance_scale_value,
step = 0.25,
visible = default_guidance_scale_field_visibility,
interactive = True
)
with gr.Row():
guidance_scale_for_sdxl_turbo_field = gr.HTML(
value = "<div style=\"padding: 10px; text-align: center; background: #fff;\">Guidance scale is not used for SDXL Turbo.</div>",
visible = default_guidance_scale_for_sdxl_turbo_field_visibility
)
with gr.Row():
seed_selection_option = gr.Slider(
label = "Seed (0 is random):",
minimum = 0,
maximum = default_seed_maximum,
value = default_seed_value,
step = 1,
interactive = True
)
if make_seed_selection_a_textbox == 1:
seed_selection_option = gr.Textbox(
label = "Seed (0 is random; " + str(default_seed_maximum) + " max):",
value = "0",
interactive = True
)
seed_field = seed_selection_option
with gr.Column(scale = 1):
with gr.Row():
if use_image_gallery == 1:
show_download_button = False
if show_download_button_for_gallery == 1:
show_download_button = True
output_image_field = gr.Gallery(
label = "Generated Images",
value = [],
# columns = 1,
# rows = 1,
selected_index = 0,
elem_id = "image_gallery",
allow_preview = "True",
preview = True,
show_download_button = show_download_button
)
else:
output_image_field = gr.Image(
label = "Generated Image",
type = "pil"
)
with gr.Row():
output_text_field = gr.Textbox(
label = "Prompt Information:",
value = "After an image is generated, its generation information will appear here." + additional_prompt_info_html,
show_copy_button = True,
lines = 10,
max_lines = 20,
every = None#,
#container = False
)
with gr.Row():
log_text_field = gr.Textbox(
label = "Generation Progress:",
elem_id = "generation_progress_id",
elem_classes = "",
interactive = False,
value = "",
show_copy_button = False,
visible = False
)
with gr.Row():
prompt_truncated_field = gr.HTML(
value = "",
visible = False
)
if enable_close_command_prompt_button == 1:
cancel_image_btn = gr.Button(
value = "Close Command Prompt / Cancel",
variant = "stop"
)
gr.Markdown("Closing the command prompt will cancel any images in the process of being created. You will need to launch it again to create more images.")
if len(ending_html) > 0:
with gr.Accordion(
label = "Information",
elem_id = "information_section_id",
open = True
):
gr.Markdown(ending_html)
base_model_field.change(
fn = base_model_field_update_function,
inputs = [
base_model_field
],
outputs = [
model_configuration_field,
negative_prompt_field,
negative_prompt_for_sdxl_turbo_field,
base_model_num_inference_steps_field,
base_model_num_inference_steps_field_for_sdxl_turbo_field,
guidance_scale_field,
guidance_scale_for_sdxl_turbo_field
],
queue = None,
show_progress = "hidden"
)
model_configuration_field.change(
fn = model_configuration_field_update_function,
inputs = [
base_model_field,
model_configuration_field
],
outputs = [
negative_prompt_field,
negative_prompt_for_sdxl_turbo_field,
base_model_num_inference_steps_field,
base_model_num_inference_steps_field_for_sdxl_turbo_field,
guidance_scale_field,
guidance_scale_for_sdxl_turbo_field,
refiner_default_config_accordion,
refiner_online_config_accordion,
refining_selection_automatically_selected_message_field,
refining_selection_online_config_normal_field,
refining_selection_online_config_automatically_selected_field,
refining_number_of_iterations_for_online_config_field
],
queue = None,
show_progress = "hidden"
)
if use_image_gallery == 1:
output_image_field.select(
fn = update_prompt_info_from_gallery,
inputs = None,
outputs = [
output_image_field,
output_text_field
],
show_progress = "hidden"
)
if (
(enable_refiner == 1) or
(enable_upscaler == 1)
):
triggers_array = []
if enable_refiner == 1:
triggers_array.extend([
refining_selection_default_config_field.change,
refining_selection_online_config_normal_field.change,
refining_selection_online_config_automatically_selected_field.change,
model_configuration_field.change
])
if enable_upscaler == 1:
triggers_array.extend([
upscaling_selection_field.change
])
gr.on(
triggers = triggers_array,
fn = update_refiner_and_upscaler_status_function,
inputs = [
base_model_field,
model_configuration_field,
refining_selection_default_config_field,
refining_selection_online_config_normal_field,
refining_selection_online_config_automatically_selected_field,
upscaling_selection_field
],
outputs = [
refiner_and_upscaler_text_field
],
queue = None,
show_progress = "hidden"
)
generate_image_btn_click_event = generate_image_btn.click(
fn = before_create_image_function,
inputs = [],
outputs = [
output_image_field,
output_text_field,
log_text_field,
generate_image_btn
],
show_progress = "minimal",
queue = True
).then(
fn = create_image_function,
inputs = [
base_model_field,
model_configuration_field,
prompt_field,
negative_prompt_field,
image_width_field,
image_height_field,
guidance_scale_field,
base_model_num_inference_steps_field,
base_model_num_inference_steps_field_for_sdxl_turbo_field,
seed_field,
refining_selection_default_config_field,
refining_selection_online_config_normal_field,
refining_selection_online_config_automatically_selected_field,
refining_denoise_start_for_default_config_field,
refining_use_denoising_start_in_base_model_when_using_refiner_field,
refining_base_model_output_to_refiner_is_in_latent_space_field,
refining_denoise_start_for_online_config_field,
refining_number_of_iterations_for_online_config_field,
upscaling_selection_field,
upscaling_num_inference_steps_field
],
outputs = [
output_image_field,
output_text_field,
prompt_truncated_field
],
queue = True
).then(
fn = after_create_image_function,
inputs = [],
outputs = [
output_text_field,
log_text_field,
generate_image_btn
],
queue = False
)
sd_interface_load_kwargs = {
"scroll_to_output": False,
"show_progress": "full"
}
if show_image_creation_progress_log == 1:
sd_interface_continuous = sd_interface.load(
fn = update_log_progress,
inputs = None,
outputs = [
log_text_field
],
every = 1,
**sd_interface_load_kwargs
)
else:
sd_interface_continuous = sd_interface.load(
**sd_interface_load_kwargs
)
if enable_close_command_prompt_button == 1:
# https://github.com/gradio-app/gradio/pull/2433/files
cancel_image_btn.click(
fn = cancel_image_processing,
inputs = None,
outputs = None,
cancels = [generate_image_btn_click_event]
)
sd_interface.queue(
max_size = 20
)
inbrowser = False
if auto_open_browser == 1:
inbrowser = True
sd_interface.launch(
inbrowser = inbrowser,
# debug = True,
share = None,
show_api = False,
quiet = True,
show_error = True,
max_threads = 1
)