import gradio as gr import torch import modin.pandas as pd from PIL import Image from diffusers import DiffusionPipeline import os import random import torchsde ########## # # Spaghetti AI by MagicFixesEverything # https://huggingface.co/magicfixeseverything # # This app is based on scripts by: # https://huggingface.co/Manjushri # This app has been adapted from that person's versions. # ########## # # For instructions on how to install this app offline, see the instructions # here: # https://huggingface.co/spaces/magicfixeseverything/ai_image_creation/blob/main/Instructions.txt # # The display was tested with gradio version 4.11.0. # # To launch this script, use the following in the command prompt, taking off # the # at the start. (You will need to adjust the start of the path if you # have changed the location) # #cd C:\Diffusers && .venv\Scripts\activate.bat && py .venv\ai_image_creation\app.py # # You must have a NVIDIA graphics card in your computer with Cuda installed # to use this script. It will not work on just a CPU on Windows. # # If not using "enable_model_cpu_offload" or "enable_sequential_cpu_offload", # memory usage will remain high until command prompt is closed. (whether # image is being created or not) # ############################################################################### ############################################################################### # # # # Begin Configurations # # # ############################################################################### ############################################################################### # # Main Directory # # This is where everything goes. Your Python virtual environment should # be here. Model data will be stored here. (unless you change the next # configuration) If configured, imagery will also be automatically be # saved here. # main_dir = "C:/Spaghetti_AI" #################### # # Use Custom Hugging Face Cache Directory # # The folder where model data is stored can get huge. I choose to add it # to a place where I am more likely to notice it more often. If you use # other Hugging Face things however, and will use these models in those # other things, then you might want to consider not having this here as # it would duplicate the model data. # # If set to 1, the data would be here: # C:\Diffusers\model_data # # If set to 0, the data would be here: # %USERPROFILE%/.cache/huggingface/hub # Which would look like this, where {Username} is the username of # your Windows account: # C:\Users\{Username}\.cache\huggingface\hub # # You need to clean out the folder occasionally as this folder will get # extremely large. Eventually, it would take up all the space on your # computer. # use_custom_hugging_face_cache_dir = 1 ##### # # Name of Model Data Folder # # This is where all the model data will go. (unless you changed it in the # previous configuration) This folder will get very large. You need to # clean it out manually occasionally. # cache_directory_folder_name = "model_data" #################### # # Default Base Model # # This will automatically be SDXL Turbo if you are running this on a CPU. # default_base_model = "sdxl" #################### # # Use Safety Checker # # This can block some NSFW images for the models that allow it. # # 0 No # 1 Yes # use_safety_checker = 0 ##### # # Auto Save Imagery # # You can automatically save the image file, and a text file with the # prompt details. # auto_save_imagery = 1 ##### # # Name of Saved Images Folder # # You can change the name of this folder if you want. Imagery will be # saved in a folder called "saved_images" in the directory configured # in "main_dir". (the saved images folder will be created # automatically) A directory for each day will be created in this # folder. Imagery will then be placed in each folder. # saved_images_folder_name = "saved_images" #################### # # Auto Open Browser From Command Prompt # auto_open_browser = 1 #################### # # Include Close Command Prompt / Cancel Button # # This doesn't work well at all. It just closes the command prompt. I # might remove this eventually unless someone knows of a way it can work # better, such as stopping the generation without closing the command # prompt. # enable_close_command_prompt_button = 0 #################### # # Use Denoising Start In Base Model When Using Refiner # # If set to "1", refining will end at the percent (expressed as decimal) # defined in the denoising start for the refiner. If the steps set are # 100, and the denoising start value is 0.75, the base model will run for # 75 steps. The refiner will then run for 25 steps. # default_use_denoising_start_in_base_model_when_using_refiner = 1 #################### # # Base Model Output To Refiner Is In Latent Space # # If set to "1", base model output is in latent space instead of PIL # image when sent to refiner. # default_base_model_output_to_refiner_is_in_latent_space = 1 #################### # # Log Generation Times # # Log generation times to saved text output. The initial time it takes to # load a model is not included in the generation time. # log_generation_times = 1 #################### # # Use Image Gallery # use_image_gallery = 1 #################### # # Show Image Creation Progress Log # # This adds the current step that image generation is on. # show_image_creation_progress_log = 1 #################### # # Show Messages In Command Prompt # # Messages will be printed in command prompt. # show_messages_in_command_prompt = 1 #################### # # Show Messages In Modal On Page # # A popup appears in the top right corner on the page. # show_messages_in_modal_on_page = 0 #################### # # Suppress Hugging Face Hub Offline Status # # By default, we add messages about the current setting of # "HF_HUB_OFFLINE" in # suppress_hugging_face_hub_offline_status = 0 #################### # # Add Seed Into Pipe # # To make generation deterministic. I add the option because the online # configuration for the PhotoReal site doesn't do that and it changes # things. # default_add_seed_into_pipe = 1 #################### # # Max Queue Size # max_queue_size_if_cpu = 1 max_queue_size_if_torch = 20 #################### # # Allow Online Configurations # # This allows matching what was created on these sites: # https://huggingface.co/spaces/Manjushri/SDXL-1.0 # https://huggingface.co/spaces/Manjushri/PhotoReal-V3.7.5 # allow_online_configurations = 1 #################### # # Up Next Is Various Configuration Arrays and Objects # #################### base_model_array = [ "sdxl", "photoreal", "sdxl_turbo", "sd_1_5_runwayml" ] base_model_names_object = { "sdxl": "Stable Diffusion XL", "photoreal": "PhotoReal", "sdxl_turbo": "Stable Diffusion XL Turbo", "sd_1_5_runwayml": "Stable Diffusion 1.5" } #################### # # "sdxl_default" # # - My customized configurations. (subject to change) # # "sdxl_2023-11-12" # # - Valid from November 12th to present. # Number of steps in upscaler changed from 5 to 15. # # "sdxl_2023-09-05" # # - Valid from September 5th to November 12th. # There were changes on this date. # # "photoreal_default" # # - My customized configurations. (subject to change) # "circulus/canvers-real-v3.7.5" # # Seeds do not match the online PhotoReal version. # # "photoreal_3-8-1" # # - My customized configurations. (subject to change) # "circulus/canvers-real-v3.8.1" # # "photoreal_3-8" # # - My customized configurations. (subject to change) # "circulus/canvers-real-v3.8" # # "photoreal_3-7-5" # # - My customized configurations. (subject to change) # "circulus/canvers-real-v3.7.5" # # "photoreal_3-6" # # - My customized configurations. (subject to change) # "circulus/canvers-realistic-v3.6" # # "photoreal_2023-11-12" # # - Valid from November 12th to present. # New base model: "circulus/canvers-real-v3.7.5" # # "photoreal_2023-09-01" # # - Valid from September 1st to November 12th. # "circulus/canvers-realistic-v3.6" was already in effect. # But there were changes on this date. # # "sdxl_turbo_default" # # - My customized configurations. (subject to change) # # "sd_1_5_runwayml_default" # # - My customized configurations. (subject to change) # base_model_object_of_model_configuration_arrays = { "sdxl": [ "sdxl_default", "sdxl_2023-11-12", "sdxl_2023-09-05" ], "photoreal": [ "photoreal_default", "photoreal_3-8-1", "photoreal_3-8", "photoreal_3-7-5", "photoreal_3-6", "photoreal_2023-11-12", "photoreal_2023-09-01" ], "sdxl_turbo": [ "sdxl_turbo_default" ], "sd_1_5_runwayml": [ "sd_1_5_runwayml_default" ] } #################### model_configuration_names_object = { "sdxl_default": "1.0 - Default", "sdxl_2023-11-12": "1.0 (2023-11-12 online config)", "sdxl_2023-09-05": "1.0 (2023-09-05 online config)", "photoreal_default": "3.6 - Default", "photoreal_3-8-1": "3.8.1 - Default", "photoreal_3-8": "3.8 - Default", "photoreal_3-7-5": "3.7.5 - Default", "photoreal_3-6": "3.6 - Default", "photoreal_2023-11-12": "3.7.5 (2023-11-12 online config)", "photoreal_2023-09-01": "3.6 (2023-09-01 online config)", "sdxl_turbo_default": "Default", "sd_1_5_runwayml_default": "1.5 - Default" } model_configuration_links_object = { "sdxl_default": "stabilityai/stable-diffusion-xl-base-1.0", "sdxl_2023-11-12": "stabilityai/stable-diffusion-xl-base-1.0", "sdxl_2023-09-05": "stabilityai/stable-diffusion-xl-base-1.0", "photoreal_default": "circulus/canvers-realistic-v3.6", "photoreal_3-8-1": "circulus/canvers-real-v3.8.1", "photoreal_3-8": "circulus/canvers-real-v3.8", "photoreal_3-7-5": "circulus/canvers-real-v3.7.5", "photoreal_3-6": "circulus/canvers-realistic-v3.6", "photoreal_2023-11-12": "circulus/canvers-real-v3.7.5", "photoreal_2023-09-01": "circulus/canvers-realistic-v3.6", "sdxl_turbo_default": "stabilityai/sdxl-turbo", "sd_1_5_runwayml_default": "runwayml/stable-diffusion-v1-5" } model_configuration_force_refiner_object = { "sdxl_2023-11-12": 1, "sdxl_2023-09-05": 1 } # For now, the ones that force the refiner also have the "Refiner Number of # Iterations" available. model_configuration_include_refiner_number_of_steps_object = model_configuration_force_refiner_object #model_configuration_include_refiner_number_of_steps_object = { # "sdxl_2023-11-12": 1, # "sdxl_2023-09-05": 1 #} # For now, the ones that force the refiner also need upscaling steps hidden. model_configuration_hide_upscaler_steps_object = model_configuration_force_refiner_object #model_configuration_hide_upscaler_steps_object = { # "sdxl_2023-11-12": 1, # "sdxl_2023-09-05": 1 #} #################### sdxl_online_configurations_object = { "sdxl_2023-11-12": 1, "sdxl_2023-09-05": 1 } photoreal_online_configurations_object = { "photoreal_2023-11-12": 1, "photoreal_2023-09-01": 1 } #################### hugging_face_refiner_partial_path = "stabilityai/stable-diffusion-xl-refiner-1.0" hugging_face_upscaler_partial_path = "stabilityai/sd-x2-latent-upscaler" #################### base_model_model_configuration_defaults_object = { "sdxl": "sdxl_default", "photoreal": "photoreal_default", "sdxl_turbo": "sdxl_turbo_default", "sd_1_5_runwayml": "sd_1_5_runwayml_default" } #################### # # Links: # # SD-XL 1.0-base Model Card # https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0 # # SD-XL 1.0-refiner Model Card # https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0 # # Stable Diffusion x2 latent upscaler model card # https://huggingface.co/stabilityai/sd-x2-latent-upscaler # # PhotoReal # 3.7.5: https://huggingface.co/circulus/canvers-real-v3.7.5 # 3.6: https://huggingface.co/circulus/canvers-realistic-v3.6 # # SDXL Turbo # https://huggingface.co/stabilityai/sdxl-turbo # # Stable Diffusion v1-5 (runwayml) # https://huggingface.co/runwayml/stable-diffusion-v1-5 # #################### default_scheduler = "model_default" schedulers_array = [ "model_default", "ddim", "ddpm", "dpm_solver_multistep", "dpm_solver_multistep_karras_sigmas_true", "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp", "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp", "dpm_solver_singlestep", "dpm_solver_singlestep_karras_sigmas_true", "kdpm2_discrete", "kdpm2_discrete_karras_sigmas_true", "kdpm2_ancestral_discrete", "kdpm2_ancestral_discrete_karras_sigmas_true", "euler_discrete", "euler_ancestral_discrete", "heun_discrete", "lms_discrete", "lms_discrete_karras_sigmas_true", "pndm", "pndm_skip_prk_steps_true", "deis_multistep", "dpm_solver_sde", "uni_pc_multistep" ] scheduler_long_names_object = { "model_default": "Model Default", "ddim": "DDIM", "ddpm": "DDPM", "dpm_solver_multistep": "DPM++ 2M (DPMSolverMultistep)", "dpm_solver_multistep_karras_sigmas_true": "DPM++ 2M Karras (DPMSolverMultistep with use_karras_sigmas=True)", "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE (DPMSolverMultistep with algorithm_type=\"sde-dpmsolver++\")", "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE Karras (DPMSolverMultistep with use_karras_sigmas=True & algorithm_type=\"sde-dpmsolver++\")", "dpm_solver_singlestep": "DPM++ SDE (DPMSolverSinglestep)", "dpm_solver_singlestep_karras_sigmas_true": "DPM++ SDE Karras (DPMSolverSinglestep with use_karras_sigmas=True)", "kdpm2_discrete": "DPM2 (KDPM2Discrete)", "kdpm2_discrete_karras_sigmas_true": "DPM2 Karras (KDPM2Discrete with use_karras_sigmas=True)", "kdpm2_ancestral_discrete": "DPM2 a (KDPM2AncestralDiscrete)", "kdpm2_ancestral_discrete_karras_sigmas_true": "DPM2 a Karras (KDPM2AncestralDiscrete with use_karras_sigmas=True)", "euler_discrete": "Euler (EulerDiscrete)", "euler_ancestral_discrete": "Euler a (EulerAncestralDiscrete)", "heun_discrete": "Heun (HeunDiscrete)", "lms_discrete": "LMS (LMSDiscrete)", "lms_discrete_karras_sigmas_true": "LMS Karras (LMSDiscrete with use_karras_sigmas=True)", "pndm": "PNDM", "pndm_skip_prk_steps_true": "PNDM (with skip_prk_steps=True) - Close to PLMS", "deis_multistep": "DEISMultistep", "dpm_solver_sde": "DPMSolverSDE", "uni_pc_multistep": "UniPCMultistep" } scheduler_short_names_object = { "ddim": "DDIM", "ddpm": "DDPM", "dpm_solver_multistep": "DPM++ 2M", "dpm_solver_multistep_karras_sigmas_true": "DPM++ 2M Karras", "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE", "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE Karras", "dpm_solver_singlestep": "DPM++ SDE", "dpm_solver_singlestep_karras_sigmas_true": "DPM++ SDE Karras", "kdpm2_discrete": "DPM2", "kdpm2_discrete_karras_sigmas_true": "DPM2 Karras", "kdpm2_ancestral_discrete": "DPM2 a", "kdpm2_ancestral_discrete_karras_sigmas_true": "DPM2 a Karras", "euler_discrete": "Euler", "euler_ancestral_discrete": "Euler a", "heun_discrete": "Heun", "lms_discrete": "LMS", "lms_discrete_karras_sigmas_true": "LMS Karras", "pndm": "PNDM", "pndm_skip_prk_steps_true": "PNDM (with skip_prk_steps=True) - Close to PLMS", "deis_multistep": "DEISMultistep", "dpm_solver_sde": "DPMSolverSDE", "uni_pc_multistep": "UniPCMultistep" } scheduler_name_to_identifier_in_app_object = { "DDIMScheduler": "ddim", "DDPMScheduler": "ddpm", "DPMSolverMultistepScheduler": "dpm_solver_multistep", "DPMSolverSinglestepScheduler": "dpm_solver_singlestep", "KDPM2DiscreteScheduler": "kdpm2_discrete", "KDPM2AncestralDiscreteScheduler": "kdpm2_ancestral_discrete", "EulerDiscreteScheduler": "euler_discrete", "EulerAncestralDiscreteScheduler": "euler_ancestral_discrete", "HeunDiscreteScheduler": "heun_discrete", "LMSDiscreteScheduler": "lms_discrete", "PNDMScheduler": "pndm", "DEISMultistepScheduler": "deis_multistep", "DPMSolverSDEScheduler": "dpm_solver_sde", "UniPCMultistepScheduler": "uni_pc_multistep" } #################### # # Determine automatically if on CPU or GPU # # CPU will not work on Windows. # device = "cpu" if torch.cuda.is_available(): device = "cuda" PYTORCH_CUDA_ALLOC_CONF = { "max_split_size_mb": 8000 } torch.cuda.max_memory_allocated( device = device ) torch.cuda.empty_cache() if device == "cpu": default_base_model = "sdxl_turbo" #################### # # Determine if running on HuggingFace # try: if (str(os.uname()).find("magicfixeseverything") >= 0): script_being_run_on_hugging_face = 1 except: script_being_run_on_hugging_face = 0 if script_being_run_on_hugging_face == 1: allow_online_configurations = 0 #################### default_prompt = "" default_negative_prompt = "" default_width = 768 default_height = 768 minimum_width = 256 minimum_height = 256 maximum_width = 2048 # 1024 maximum_height = 2048 # 1024 default_base_model_steps = 50 default_base_model_steps_for_sdxl_turbo = 2 maximum_base_model_steps = 150 # 100 maximum_base_model_steps_for_sdxl_turbo = 25 default_guidance_scale = 7.5 minimum_guidance_scale = 0 maximum_guidance_scale = 30 # Must be greater than 0 guidance_scale_input_slider_steps = 0.25 # # To have the seed be random, set this to: # # "random" # default_seed_value = "random" maximum_seed = 1000000000000000000 add_generation_information_to_image = 1 # If you turn off the refiner it will not be available in the display unless # you select an online configuration option that requires it. enable_refiner = 1 enable_upscaler = 1 # Selected on form as a default? default_refiner_selected = 0 default_upscaler_selected = 0 # Accordion visible on load? # # 0 If selected as default, will be open. Otherwise, closed. # 1 Always starts open default_refiner_accordion_open = 1 default_upscaler_accordion_open = 1 default_refiner_denoise_start = 0.95 minimum_refiner_denoise_start = 0.01 maximum_refiner_denoise_start = 0.99 # Must be greater than 0 refiner_denoise_start_input_slider_steps = 0.01 # Only for SDXL online configuration. We just use base model steps and denoise start normally default_refining_steps_for_online_config_field = 100 maximum_refining_steps_for_online_config_field = 100 # Upscaler Options maximum_upscaler_steps = 150 default_upscaler_steps = 50 # xFormers: # # https://huggingface.co/docs/diffusers/optimization/xformers use_xformers = 1 # Scaled dot product attention (SDPA) is used by default for PyTorch 2.0. To # use default instead, set this to 1. # # https://huggingface.co/docs/diffusers/optimization/torch2.0#scaled-dot-product-attention use_default_attn_processor = 0 display_xformers_usage_in_prompt_info = 1 include_transformers_version_in_prompt_info = 1 display_default_attn_processor_usage_in_prompt_info = 1 # You can't select both sequential and model cpu offloading. If you select # both, model cpu offloading will be used. use_sequential_cpu_offload_for_base_model = 1 use_sequential_cpu_offload_for_refiner = 1 use_sequential_cpu_offload_for_upscaler = 1 use_model_cpu_offload_for_base_model = 0 use_model_cpu_offload_for_refiner = 0 use_model_cpu_offload_for_upscaler = 0 if default_base_model == "sdxl": # SDXL default_width = 1024 default_height = 1024 #elif default_base_model == "photoreal": # PhotoReal #elif default_base_model == "sdxl_turbo": # SDXL Turbo #elif default_base_model == "sd_1_5_runwayml": # SD 1.5 # Must be multiple of 8 width_and_height_input_slider_steps = 8 opening_html = "" ending_html = "" max_queue_size = max_queue_size_if_torch if device == "cpu": if script_being_run_on_hugging_face == 1: opening_html = "This app is extremely slow. This app is not running on a GPU. The first time it loads after the space is rebuilt it might take 10 minutes to generate a SDXL Turbo image. It may take around 3 minutes after that point to do two steps. (with no refining or upscaling) For other models, it may take an hour to create a single image. Want apps that work fast? Use these which this app is based on: Stable Diffusion XL, PhotoReal with SDXL 1.0 Refiner and SDXL Turbo Unofficial Demo. This app is designed to give more options, but it's too slow to operate and test on a CPU. There are some features that are either not available, or more limited, in the online version of this app. (such as smaller allowed image dimensions and less steps allowed) This app is still in development. Next will be the ability to create a link like this." else: opening_html = "This app is currently running on a CPU. If you have a NVIDIA graphics card, make sure you have torch installed so that you can use your GPU to create imagery. If you don't, it will work extremely slowly." max_queue_size = max_queue_size_if_cpu if allow_online_configurations == 1: ending_html = """This app allows you to try to match images that can be generated using several tools online. (Stable Diffusion XL, PhotoReal with SDXL 1.0 Refiner and SDXL Turbo Unofficial Demo) You can select the base model you want to use in the first dropdown option. The second configuration option involves choosing which version and/or configuration to choose. Certain configurations try to match the version online, taking into account changes that were made over time. Another configuration involves a default configuration I choose and is subject to change while I am still designing this app. """ ending_html += """Spaghetti AI Logo Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions. The original script for this app was written by Manjushri.""" refiner_on_text = "Refiner is on. " refiner_off_text = "Refiner is off. " upscaler_on_text = "Upscaler is on. " upscaler_off_text = "Upscaler is off. " number_of_reserved_tokens = 2 ############################################################################### ############################################################################### # # # # End Configurations # # # ############################################################################### ############################################################################### if script_being_run_on_hugging_face == 1: # If on HuggingFace, I change some things. use_custom_hugging_face_cache_dir = 0 auto_save_imagery = 0 show_messages_in_modal_on_page = 0 show_messages_in_command_prompt = 1 if device == "cpu": # If on CPU at HuggingFace, I reduce what is available. show_image_creation_progress_log = 1 maximum_width = 768 maximum_height = 768 minimum_guidance_scale = 1 maximum_guidance_scale = 15 maximum_base_model_steps = 30 maximum_base_model_steps_for_sdxl_turbo = 5 minimum_refiner_denoise_start = 0.70 maximum_upscaler_steps = 15 default_upscaler_steps = 10 ending_html = """ If you would like to download this app to run offline on a Windows computer that has a NVIDIA graphics card, click here to download it. """ + ending_html if default_width < minimum_width: default_width = minimum_width if default_height < minimum_height: default_height = minimum_height if default_width > maximum_width: default_width = maximum_width if default_height > maximum_height: default_height = maximum_height if default_base_model_steps > maximum_base_model_steps: default_base_model_steps = maximum_base_model_steps if default_base_model_steps_for_sdxl_turbo > maximum_base_model_steps_for_sdxl_turbo: default_base_model_steps_for_sdxl_turbo = maximum_base_model_steps_for_sdxl_turbo if default_guidance_scale < minimum_guidance_scale: default_guidance_scale = minimum_guidance_scale if default_guidance_scale > maximum_guidance_scale: default_guidance_scale = maximum_guidance_scale if default_upscaler_steps > maximum_upscaler_steps: default_upscaler_steps = maximum_upscaler_steps if allow_online_configurations == 0: base_model_array = [ "sdxl", "photoreal", "sdxl_turbo" ] base_model_object_of_model_configuration_arrays = { "sdxl": [ "sdxl_default" ], "photoreal": [ "photoreal_default" ], "sdxl_turbo": [ "sdxl_turbo_default" ] } base_model_model_configuration_defaults_object = { "sdxl": "sdxl_default", "photoreal": "photoreal_default", "sdxl_turbo": "sdxl_turbo_default" } hugging_face_hub_is_offline = 0 if script_being_run_on_hugging_face == 0: if ( ("HF_HUB_OFFLINE" in os.environ) and (int(os.environ["HF_HUB_OFFLINE"]) == 1) ): hugging_face_hub_is_offline = 1 if suppress_hugging_face_hub_offline_status == 1: if hugging_face_hub_is_offline == 0: print ("Note: The Hugging Face cache directory does not automatically delete older data. Over time, it could eventually grow to use all the space on the drive it is on. You either need to manually clean out the folder occasionally or see Instructons.txt on how to not automatically update data once you have downloaded everything you need.") else: print ("You are working offline. Data will not be downloaded. See \"ai_image_creation.bat\" or \"Instructions.txt\" for more info.") if device == "cuda": PYTORCH_CUDA_ALLOC_CONF = { "max_split_size_mb": 8000 } torch.cuda.max_memory_allocated( device = device ) torch.cuda.empty_cache() saved_images_dir = main_dir + "/" + saved_images_folder_name hugging_face_cache_dir = main_dir + "/" + cache_directory_folder_name if not os.path.exists(hugging_face_cache_dir): os.makedirs(hugging_face_cache_dir) if auto_save_imagery == 1: from datetime import datetime import time if ( (log_generation_times == 1) or (show_image_creation_progress_log == 1) ): import time if device == "cpu": use_sequential_cpu_offload_for_base_model = 0 use_sequential_cpu_offload_for_refiner = 0 use_sequential_cpu_offload_for_upscaler = 0 use_model_cpu_offload_for_base_model = 0 use_model_cpu_offload_for_refiner = 0 use_model_cpu_offload_for_upscaler = 0 use_xformers = 0 if ( (use_sequential_cpu_offload_for_base_model == 1) and (use_model_cpu_offload_for_base_model == 1) ): use_sequential_cpu_offload_for_base_model = 0 if ( (use_sequential_cpu_offload_for_refiner == 1) and (use_model_cpu_offload_for_refiner == 1) ): use_sequential_cpu_offload_for_refiner = 0 if ( (use_sequential_cpu_offload_for_upscaler == 1) and (use_model_cpu_offload_for_upscaler == 1) ): use_sequential_cpu_offload_for_upscaler = 0 def error_function( text_message ): print (text_message) raise Exception(text_message) additional_prompt_info_html = "" if auto_save_imagery == 1: additional_prompt_info_html = " The image, and a text file with generation information, will be saved automatically." if use_xformers == 1: from xformers.ops import MemoryEfficientAttentionFlashAttentionOp if use_default_attn_processor == 1: from diffusers.models.attention_processor import AttnProcessor if ( default_base_model and (default_base_model in base_model_object_of_model_configuration_arrays) and (default_base_model in base_model_model_configuration_defaults_object) ): default_model_configuration = base_model_model_configuration_defaults_object[default_base_model] if default_model_configuration in model_configuration_names_object: default_model_configuration_choices_array = [] for this_model_configuration in base_model_object_of_model_configuration_arrays[default_base_model]: if this_model_configuration in model_configuration_names_object: default_model_configuration_choices_array.append( model_configuration_names_object[this_model_configuration] ) else: error_function("A default configuration must be properly named in the code.") else: error_function("A default configuration must be properly configured in the code.") else: error_function("A default base model must be properly configured in the code.") default_base_model_nicely_named_value = base_model_names_object[default_base_model] default_model_configuration_nicely_named_value = model_configuration_names_object[default_model_configuration] if not ( default_scheduler and default_scheduler in scheduler_long_names_object ): error_function("A default scheduler must be properly configured in the code.") default_scheduler_nicely_named_value = scheduler_long_names_object[default_scheduler] if enable_refiner != 1: default_refiner_selected = 0 if enable_upscaler != 1: default_upscaler_selected = 0 model_configuration_requires_refiner = 0 if default_model_configuration in model_configuration_force_refiner_object: model_configuration_requires_refiner = model_configuration_force_refiner_object[default_model_configuration] if model_configuration_requires_refiner == 1: enable_refiner = 1 default_refiner_selected = 1 default_refine_option = "No" if default_refiner_selected == 1: default_refine_option = "Yes" default_upscale_option = "No" if default_upscaler_selected == 1: default_upscale_option = "Yes" online_configurations_object = {} online_configurations_object.update(sdxl_online_configurations_object) online_configurations_object.update(photoreal_online_configurations_object) is_default_config = 1 if default_model_configuration in online_configurations_object: is_default_config = 0 default_refiner_and_upscaler_status_text = "" default_use_denoising_start_in_base_model_when_using_refiner_is_selected = False if default_use_denoising_start_in_base_model_when_using_refiner == 1: default_use_denoising_start_in_base_model_when_using_refiner_is_selected = True default_base_model_output_to_refiner_is_in_latent_space_is_selected = False if default_base_model_output_to_refiner_is_in_latent_space == 1: default_base_model_output_to_refiner_is_in_latent_space_is_selected = True refiner_default_config_accordion_visible = True if ( (enable_refiner != 1) or (is_default_config != 1) ): refiner_default_config_accordion_visible = False refiner_default_config_accordion_open = False if ( (default_refiner_accordion_open == 1) or ( (is_default_config == 1) and (default_refiner_selected == 1) ) ): refiner_default_config_accordion_open = True refiner_online_config_accordion_visible = True if ( (enable_refiner != 1) or (is_default_config == 1) ): refiner_online_config_accordion_visible = False refiner_online_config_accordion_open = False if ( (default_refiner_accordion_open == 1) or ( (is_default_config != 1) and (default_refiner_selected == 1) ) ): refiner_online_config_accordion_open = True refiner_group_visible = False if enable_refiner == 1: refiner_group_visible = True if default_refiner_selected == 1: default_refiner_and_upscaler_status_text += refiner_on_text else: default_refiner_and_upscaler_status_text += refiner_off_text upscaler_accordion_open = False if ( (default_upscaler_selected == 1) or (default_upscaler_accordion_open == 1) ): upscaler_accordion_open = True upscaler_group_visible = False if enable_upscaler == 1: upscaler_group_visible = True if default_upscaler_selected == 1: default_refiner_and_upscaler_status_text += upscaler_on_text else: default_refiner_and_upscaler_status_text += upscaler_off_text default_negative_prompt_field_row_visibility = True default_negative_prompt_for_sdxl_turbo_field_row_visibility = False default_base_model_steps_field_row_visibility = True default_base_model_steps_field_for_sdxl_turbo_field_row_visibility = False default_guidance_scale_field_row_visibility = True default_guidance_scale_for_sdxl_turbo_field_row_visibility = False if default_base_model == "sdxl_turbo": default_negative_prompt_field_row_visibility = False default_negative_prompt_for_sdxl_turbo_field_row_visibility = True default_base_model_steps_field_row_visibility = False default_base_model_steps_field_for_sdxl_turbo_field_row_visibility = True default_guidance_scale_field_row_visibility = False default_guidance_scale_for_sdxl_turbo_field_row_visibility = True default_add_seed_into_pipe_field_row_visibility = False if is_default_config == 1: default_add_seed_into_pipe_field_row_visibility = True default_add_seed_into_pipe_is_selected = False if default_add_seed_into_pipe == 1: default_add_seed_into_pipe_is_selected = True default_base_model_choices_array = [] stored_model_configuration_names_object = {} for this_base_model in base_model_array: default_base_model_choices_array.append( base_model_names_object[this_base_model] ) stored_model_configuration = base_model_model_configuration_defaults_object[this_base_model] stored_model_configuration_names_object[this_base_model] = model_configuration_names_object[stored_model_configuration] default_scheduler_choices_array = [] for this_scheduler in schedulers_array: default_scheduler_choices_array.append( scheduler_long_names_object[this_scheduler] ) make_seed_selection_a_textbox = 1 if maximum_seed <= 9007199254740992: make_seed_selection_a_textbox = 0 ############################################################################### ############################################################################### # # # # # # # Functions # # # # # # ############################################################################### ############################################################################### ##################### # # Show Message # # Display message to user in model on web form and/or command prompt. # ##################### def generate_random_seed(): maximum_seed_for_random = maximum_seed if maximum_seed_for_random > 9007199254740992: # If above this number, seeds may not be able to be entered into # slider properly. maximum_seed_for_random = 9007199254740992 actual_seed = int(random.randrange(0, 10**len(str(maximum_seed_for_random)))) return actual_seed ##################### # # Show Message # # Display message to user in model on web form and/or command prompt. # ##################### def show_message( message_to_display ): if show_messages_in_command_prompt == 1: print (message_to_display) if show_messages_in_modal_on_page == 1: gr.Info(message_to_display) ##################### # # Convert Seconds # # Convert raw seconds to the numer of hours, minutes and seconds. # ##################### def convert_seconds( seconds ): # Google AI Code hours = seconds // 3600 minutes = (seconds % 3600) // 60 seconds = seconds % 60 return hours, minutes, seconds ##################### # # Base Model Valid # # Return True if valid. # ##################### def base_model_valid(base_model_name_value): try: base_model_name_value_str = str(base_model_name_value).lower() if ( (base_model_name_value_str in base_model_object_of_model_configuration_arrays) and (base_model_name_value_str in base_model_model_configuration_defaults_object) ): return True else: return False except ValueError: return False ##################### # # Model Configuration Valid # # Return True if valid. # ##################### def model_configuration_valid( base_model_name_value, model_configuration_name_value ): try: base_model_name_value_str = str(base_model_name_value).lower() model_configuration_name_value_str = str(model_configuration_name_value).lower() for this_base_model in base_model_array: for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: if ( (base_model_name_value_str == this_base_model) and (model_configuration_name_value_str == this_model_configuration) ): return True return False except ValueError: return False ##################### # # Prompt Valid # # Return True if valid. # ##################### def prompt_valid(prompt_field): try: prompt_field_str = str(prompt_field) if len(prompt_field_str) <= 500: return True else: return False except ValueError: return False ##################### # # Negative Prompt Valid # # Return True if valid. # ##################### def negative_prompt_valid(negative_prompt_field): try: negative_prompt_field_str = str(negative_prompt_field) if len(negative_prompt_field_str) <= 500: return True else: return False except ValueError: return False ##################### # # Scheduler/Sampler Valid # # Return True if valid. # ##################### def scheduler_valid(scheduler_field): try: scheduler_str = str(scheduler_field).lower() if scheduler_str in scheduler_long_names_object: return True else: return False except ValueError: return False ##################### # # Width Valid # # Return True if valid. # ##################### def width_valid(width_num_str): try: width_num = int(width_num_str) if ( (width_num >= int(minimum_width)) and (width_num <= int(maximum_width)) and (width_num % int(width_and_height_input_slider_steps)) == 0 ): return True else: return False except ValueError: return False ##################### # # Height Valid # # Return True if valid. # ##################### def height_valid(height_num_str): try: height_num = int(height_num_str) if ( (height_num >= int(minimum_height)) and (height_num <= int(maximum_height)) and (height_num % int(width_and_height_input_slider_steps)) == 0 ): return True else: return False except ValueError: return False ##################### # # Guidance Scale Valid # # Return True if valid. # ##################### def guidance_scale_valid(guidance_scale_str): try: guidance_scale_num = float(guidance_scale_str) guidance_scale_num_times_100 = (guidance_scale_num * 100) guidance_scale_num_times_100_with_int = int(guidance_scale_num_times_100) guidance_scale_input_slider_steps_times_100 = (float(guidance_scale_input_slider_steps) * 100) if ( (guidance_scale_num >= float(minimum_guidance_scale)) and (guidance_scale_num <= float(maximum_guidance_scale)) and (guidance_scale_num_times_100 == guidance_scale_num_times_100_with_int) and ((guidance_scale_num_times_100 % guidance_scale_input_slider_steps_times_100) == 0) ): return True else: return False except ValueError: return False ##################### # # Steps Valid # # Return True if valid. # ##################### def steps_valid( steps_num_str, base_model_name_value ): try: steps_num = int(steps_num_str) base_model_name_value_str = str(base_model_name_value).lower() if steps_num > 0: if (base_model_name_value_str == "sdxl_turbo"): if steps_num <= int(maximum_base_model_steps_for_sdxl_turbo): return True else: if steps_num <= int(maximum_base_model_steps): return True return False except ValueError: return False ##################### # # Seed Valid # # Return True if valid. # ##################### def seed_valid( seed_num_str ): try: seed_num = int(seed_num_str) if ( (seed_num >= 0) and (seed_num <= int(maximum_seed)) ): return True else: return False except ValueError: return False ##################### # # Refiner Denoise Start # # Return True if valid. # ##################### def refiner_denoise_start_valid( refiner_denoise_start_str ): try: refiner_denoise_start_num = float(refiner_denoise_start_str) refiner_denoise_start_num_times_100 = (refiner_denoise_start_num * 100) refiner_denoise_start_num_times_100_with_int = int(refiner_denoise_start_num_times_100) refiner_denoise_start_input_slider_steps_times_100 = (float(refiner_denoise_start_input_slider_steps) * 100) if ( (refiner_denoise_start_num >= float(minimum_refiner_denoise_start)) and (refiner_denoise_start_num <= float(maximum_refiner_denoise_start)) and (refiner_denoise_start_num_times_100 == refiner_denoise_start_num_times_100_with_int) and ((refiner_denoise_start_num_times_100 % refiner_denoise_start_input_slider_steps_times_100) == 0) ): return True else: return False except ValueError: return False ##################### # # Refiner Steps # # Return True if valid. # ##################### def refining_steps_valid( refining_steps_num_str ): try: refining_steps_num = int(refining_steps_num_str) if ( (refining_steps_num > 0) and (refining_steps_num <= int(maximum_refining_steps_for_online_config_field)) ): return True else: return False except ValueError: return False ##################### # # Upscaler Steps # # Return True if valid. # ##################### def upscaling_steps_valid( upscaling_steps_num_str ): try: upscaling_steps_num = int(upscaling_steps_num_str) if ( (upscaling_steps_num > 0) and (upscaling_steps_num <= int(maximum_upscaler_steps)) ): return True else: return False except ValueError: return False ##################### # # Numerical Bool # # Return 1 for anything that is True/Yes/1. Everything else is False. # ##################### def numerical_bool( original_value ): new_value = 0 if ( (original_value == 1) or (original_value == "Yes") or (original_value == "True") or (original_value == True) ): new_value = 1 return new_value ##################### # # Truncate Prompt # # Truncate a prompt. Get the actual prompt that will be used and save the # part of the prompt that will not be used. # ##################### def truncate_prompt ( pipe, existing_prompt_text ): # Only 77 tokens are allowed in the prompt. 2 are reserved, meaning it is # truncated to 75. This happens automatically, but we want to tell people # that tokenizer = pipe.tokenizer max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens prompt_text_words_array = existing_prompt_text.split(" ") prompt_text_words_array_length = len(prompt_text_words_array) prompt_text_words_index = 0 prompt_text_substring = "" prompt_text_not_used_substring = "" for prompt_text_word in prompt_text_words_array: prompt_text_words_index += 1 substring_to_test = prompt_text_substring if prompt_text_words_index > 1: substring_to_test += " " substring_to_test += prompt_text_word token_length_of_substring_to_test = len(tokenizer.tokenize(substring_to_test)) if token_length_of_substring_to_test > max_token_length_of_model: prompt_text_not_used_substring += prompt_text_word + " " else: prompt_text_substring = substring_to_test return ( prompt_text_substring, prompt_text_not_used_substring ) ##################### # # Construct Pipe # # Prepare the base model. # ##################### def construct_pipe ( base_model_name_value, model_configuration_name_value ): if device == "cuda": torch.cuda.empty_cache() base_model_kwargs = {} if ( (base_model_name_value == "sdxl") or (base_model_name_value == "photoreal") or (base_model_name_value == "sdxl_turbo") or (base_model_name_value == "sd_1_5_runwayml") ): base_model_kwargs["use_safetensors"] = True if use_safety_checker == 0: if ( (base_model_name_value == "photoreal") or (base_model_name_value == "sd_1_5_runwayml") ): base_model_kwargs = { "safety_checker": None, "requires_safety_checker": False } if device == "cuda": if ( (base_model_name_value == "sdxl") or (base_model_name_value == "sdxl_turbo") or (base_model_name_value == "sd_1_5_runwayml") ): base_model_kwargs["variant"] = "fp16" base_model_kwargs["torch_dtype"] = torch.float16 if use_custom_hugging_face_cache_dir == 1: base_model_kwargs["cache_dir"] = hugging_face_cache_dir pipe = DiffusionPipeline.from_pretrained( model_configuration_links_object[model_configuration_name_value], **base_model_kwargs ) if use_model_cpu_offload_for_base_model == 1: pipe.enable_model_cpu_offload() if use_xformers == 1: pipe.enable_xformers_memory_efficient_attention() pipe = pipe.to(device) if use_sequential_cpu_offload_for_base_model == 1: pipe.enable_sequential_cpu_offload() if use_default_attn_processor == 1: pipe.unet.set_default_attn_processor() if device == "cuda": torch.cuda.empty_cache() # else: # pipe.unet = torch.compile( # pipe.unet, # mode = "reduce-overhead", # fullgraph = True # ) return ( pipe ) ##################### # # Configure Scheduler # ##################### def configure_scheduler ( pipe, scheduler_value ): scheduler_config = pipe.scheduler.config scheduler = scheduler_value if scheduler_value == "model_default": scheduler_name = pipe.scheduler.config._class_name if scheduler_name in scheduler_name_to_identifier_in_app_object: scheduler = scheduler_name_to_identifier_in_app_object[scheduler_name] scheduler_used = scheduler if scheduler == "ddim": from diffusers import DDIMScheduler pipe.scheduler = DDIMScheduler.from_config(scheduler_config) elif scheduler == "ddpm": from diffusers import DDPMScheduler pipe.scheduler = DDPMScheduler.from_config(scheduler_config) elif scheduler == "dpm_solver_multistep": from diffusers import DPMSolverMultistepScheduler pipe.scheduler = DPMSolverMultistepScheduler.from_config(scheduler_config) elif scheduler == "dpm_solver_multistep_karras_sigmas_true": new_scheduler_config = dict(pipe.scheduler.config) new_scheduler_config.update({"use_karras_sigmas": True}) from diffusers import DPMSolverMultistepScheduler pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config) elif scheduler == "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": new_scheduler_config = dict(pipe.scheduler.config) new_scheduler_config.update({"algorithm_type": "sde-dpmsolver++"}) from diffusers import DPMSolverMultistepScheduler pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config) elif scheduler == "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": new_scheduler_config = dict(pipe.scheduler.config) new_scheduler_config.update({"use_karras_sigmas": True}) new_scheduler_config.update({"algorithm_type": "sde-dpmsolver++"}) from diffusers import DPMSolverMultistepScheduler pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config) elif scheduler == "dpm_solver_singlestep": from diffusers import DPMSolverSinglestepScheduler pipe.scheduler = DPMSolverSinglestepScheduler.from_config(scheduler_config) elif scheduler == "dpm_solver_singlestep_karras_sigmas_true": new_scheduler_config = dict(pipe.scheduler.config) new_scheduler_config.update({"use_karras_sigmas": True}) from diffusers import DPMSolverSinglestepScheduler pipe.scheduler = DPMSolverSinglestepScheduler.from_config(new_scheduler_config) elif scheduler == "kdpm2_discrete": from diffusers import KDPM2DiscreteScheduler pipe.scheduler = KDPM2DiscreteScheduler.from_config(scheduler_config) elif scheduler == "kdpm2_discrete_karras_sigmas_true": new_scheduler_config = dict(pipe.scheduler.config) new_scheduler_config.update({"use_karras_sigmas": True}) from diffusers import KDPM2DiscreteScheduler pipe.scheduler = KDPM2DiscreteScheduler.from_config(new_scheduler_config) elif scheduler == "kdpm2_ancestral_discrete": from diffusers import KDPM2AncestralDiscreteScheduler pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(scheduler_config) elif scheduler == "kdpm2_ancestral_discrete_karras_sigmas_true": new_scheduler_config = dict(pipe.scheduler.config) new_scheduler_config.update({"use_karras_sigmas": True}) from diffusers import KDPM2AncestralDiscreteScheduler pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(new_scheduler_config) elif scheduler == "euler_discrete": from diffusers import EulerDiscreteScheduler pipe.scheduler = EulerDiscreteScheduler.from_config(scheduler_config) elif scheduler == "euler_ancestral_discrete": from diffusers import EulerAncestralDiscreteScheduler pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler_config) elif scheduler == "heun_discrete": from diffusers import HeunDiscreteScheduler pipe.scheduler = HeunDiscreteScheduler.from_config(scheduler_config) elif scheduler == "lms_discrete": from diffusers import LMSDiscreteScheduler pipe.scheduler = LMSDiscreteScheduler.from_config(scheduler_config) elif scheduler == "lms_discrete_karras_sigmas_true": new_scheduler_config = dict(pipe.scheduler.config) new_scheduler_config.update({"use_karras_sigmas": True}) from diffusers import LMSDiscreteScheduler pipe.scheduler = LMSDiscreteScheduler.from_config(new_scheduler_config) elif scheduler == "pndm": from diffusers import PNDMScheduler pipe.scheduler = PNDMScheduler.from_config(scheduler_config) elif scheduler == "pndm_skip_prk_steps_true": new_scheduler_config = dict(pipe.scheduler.config) new_scheduler_config.update({"skip_prk_steps": True}) from diffusers import PNDMScheduler pipe.scheduler = PNDMScheduler.from_config(new_scheduler_config) elif scheduler == "deis_multistep": from diffusers import DEISMultistepScheduler pipe.scheduler = DEISMultistepScheduler.from_config(scheduler_config) elif scheduler == "dpm_solver_sde": from diffusers import DPMSolverSDEScheduler pipe.scheduler = DPMSolverSDEScheduler.from_config(scheduler_config) elif scheduler == "uni_pc_multistep": from diffusers import UniPCMultistepScheduler pipe.scheduler = UniPCMultistepScheduler.from_config(scheduler_config) else: from diffusers import PNDMScheduler pipe.scheduler = PNDMScheduler.from_config(scheduler_config) scheduler_used = "pndm" return ( scheduler_used ) ##################### # # Construct Refiner # # Prepare the refiner. # ##################### def construct_refiner (): refiner_kwargs = { "use_safetensors": True } if device == "cuda": refiner_kwargs["variant"] = "fp16" refiner_kwargs["torch_dtype"] = torch.float16 if use_custom_hugging_face_cache_dir == 1: refiner_kwargs["cache_dir"] = hugging_face_cache_dir refiner = DiffusionPipeline.from_pretrained( hugging_face_refiner_partial_path, **refiner_kwargs ) if use_model_cpu_offload_for_refiner == 1: refiner.enable_model_cpu_offload() if use_xformers == 1: refiner.enable_xformers_memory_efficient_attention() refiner = refiner.to(device) if use_sequential_cpu_offload_for_refiner == 1: refiner.enable_sequential_cpu_offload() if use_default_attn_processor == 1: refiner.unet.set_default_attn_processor() if device == "cuda": torch.cuda.empty_cache() # else: # refiner.unet = torch.compile( # refiner.unet, # mode = "reduce-overhead", # fullgraph = True # ) return ( refiner ) ##################### # # Construct Upscaler # # Prepare the upscaler. # ##################### def construct_upscaler (): upscaler_kwargs = { "use_safetensors": True } if device == "cuda": upscaler_kwargs["torch_dtype"] = torch.float16 if use_custom_hugging_face_cache_dir == 1: upscaler_kwargs["cache_dir"] = hugging_face_cache_dir upscaler = DiffusionPipeline.from_pretrained( hugging_face_upscaler_partial_path, **upscaler_kwargs ) if use_model_cpu_offload_for_upscaler == 1: upscaler.enable_model_cpu_offload() if use_xformers == 1: upscaler.enable_xformers_memory_efficient_attention() upscaler = upscaler.to(device) if use_sequential_cpu_offload_for_upscaler == 1: upscaler.enable_sequential_cpu_offload() if use_default_attn_processor == 1: upscaler.unet.set_default_attn_processor() if device == "cuda": torch.cuda.empty_cache() # else: # upscaler.unet = torch.compile( # upscaler.unet, # mode = "reduce-overhead", # fullgraph = True # ) return ( upscaler ) ##################### # # Update Prompt Info From Gallery # # If you select an image in the image gallery, display the prompt # information for that image. # ##################### def update_prompt_info_from_gallery ( gallery_data: gr.SelectData, image_gallery_array_state_value ): gallery_data_index = gallery_data.index output_image_gallery_field_update = gr.Gallery( selected_index = gallery_data_index ) output_text_field_update = image_gallery_array_state_value[gallery_data_index] return { output_image_gallery_field: output_image_gallery_field_update, output_text_field: output_text_field_update } ##################### # # Before Create Image Function # # This is loaded before the image creation begins. # ##################### def before_create_image_function (): generate_image_btn_update = gr.Button( value = "Generating...", variant = "secondary", interactive = False ) output_text_field_update = gr.Textbox( visible = False ) return { generate_image_btn: generate_image_btn_update, output_text_field: output_text_field_update } ##################### # # After Create Image Function # # This is loaded once image creation has completed. # ##################### def after_create_image_function (): generate_image_btn_update = gr.Button( value = "Generate", variant = "primary", interactive = True ) output_text_field_update = gr.Textbox( visible = True ) return { generate_image_btn: generate_image_btn_update, output_text_field: output_text_field_update } ##################### # # Create Image Function # # This is the main image creation function. # ##################### def create_image_function ( base_model_field_index, prompt_text, negative_prompt_text, scheduler_index, image_width, image_height, guidance_scale, base_model_num_inference_steps, base_model_steps_field_for_sdxl_turbo, actual_seed, add_seed_into_pipe, refining_selection_default_config_field_value, refining_selection_online_config_normal_field_value, refining_selection_online_config_automatically_selected_field_value, refining_denoise_start_for_default_config_field_value, refining_use_denoising_start_in_base_model_when_using_refiner_field_value, refining_base_model_output_to_refiner_is_in_latent_space_field_value, refining_denoise_start_for_online_config_field_value, refining_steps_for_sdxl_online_config_field_value, upscaling_selection_field_value, upscaling_num_inference_steps, image_gallery_array_state_value, prompt_information_array_state_value, last_model_configuration_name_selected_state_value, last_refiner_name_selected_state_value, last_upscaler_name_selected_state_value, stored_pipe_state, stored_refiner_state, stored_upscaler_state, *model_configuration_dropdown_fields_array, progress = gr.Progress() ): image_width = int(image_width) image_height = int(image_height) guidance_scale = float(guidance_scale) base_model_num_inference_steps = int(base_model_num_inference_steps) base_model_steps_field_for_sdxl_turbo = int(base_model_steps_field_for_sdxl_turbo) actual_seed = int(actual_seed) refining_denoise_start_for_default_config_field_value = round(float(refining_denoise_start_for_default_config_field_value), 2) refining_denoise_start_for_online_config_field_value = round(float(refining_denoise_start_for_online_config_field_value), 2) refining_steps_for_sdxl_online_config_field_value = int(refining_steps_for_sdxl_online_config_field_value) upscaling_num_inference_steps = int(upscaling_num_inference_steps) base_model_name_value = base_model_array[base_model_field_index] position_in_array = 0 model_configuration_field_object = {} for model_configuration_field_index in model_configuration_dropdown_fields_array: this_base_model = base_model_array[position_in_array] model_configuration_field_object[this_base_model] = model_configuration_field_index position_in_array += 1 model_configuration_field_index = model_configuration_field_object[base_model_name_value] model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_name_value][model_configuration_field_index] current_actual_total_base_model_steps = base_model_num_inference_steps current_actual_total_refiner_steps = 0 current_actual_total_upscaler_steps = upscaling_num_inference_steps if base_model_name_value == "sdxl_turbo": negative_prompt_text = "" base_model_num_inference_steps = base_model_steps_field_for_sdxl_turbo current_actual_total_base_model_steps = base_model_num_inference_steps guidance_scale = 0 scheduler_value = schedulers_array[scheduler_index] if not base_model_valid(base_model_name_value): error_function("Base model is not valid.") if not model_configuration_valid(base_model_name_value, model_configuration_name_value): error_function("Model configuration is not valid.") if not prompt_valid(prompt_text): error_function("Prompt is not valid.") if not negative_prompt_valid(negative_prompt_text): error_function("Negative prompt is not valid.") if not scheduler_valid(scheduler_value): error_function("Scheduler/sampler is not valid.") if not width_valid(image_width): error_function("Image width is not valid.") if not height_valid(image_height): error_function("Image height is not valid.") if base_model_name_value != "sdxl_turbo": if not guidance_scale_valid(guidance_scale): error_function("Guidance scale is not valid.") base_model_steps = base_model_num_inference_steps if base_model_name_value == "sdxl_turbo": steps = base_model_steps_field_for_sdxl_turbo if not steps_valid(base_model_steps, base_model_name_value): error_function("Steps option is not valid.") if not seed_valid(actual_seed): error_function("Seed is not valid.") add_seed_into_pipe = numerical_bool(add_seed_into_pipe) refining_selection_default_config_field_value = numerical_bool(refining_selection_default_config_field_value) refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value) refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value) refining_use_denoising_start_in_base_model_when_using_refiner_field_value = numerical_bool(refining_use_denoising_start_in_base_model_when_using_refiner_field_value) refining_base_model_output_to_refiner_is_in_latent_space_field_value = numerical_bool(refining_base_model_output_to_refiner_is_in_latent_space_field_value) use_upscaler = numerical_bool(upscaling_selection_field_value) is_default_config_state = 1 if model_configuration_name_value in online_configurations_object: is_default_config_state = 0 is_sdxl_online_config = 0 is_photoreal_online_config = 0 if ( (model_configuration_name_value in sdxl_online_configurations_object) and (sdxl_online_configurations_object[model_configuration_name_value]) ): is_sdxl_online_config = 1 elif ( (model_configuration_name_value in photoreal_online_configurations_object) and photoreal_online_configurations_object[model_configuration_name_value] ): is_photoreal_online_config = 1 use_refiner = 0 if ( ( (is_default_config_state == 1) and refining_selection_default_config_field_value ) or ( (is_default_config_state != 1) and ( ( (model_configuration_name_value not in model_configuration_force_refiner_object) and refining_selection_online_config_normal_field_value ) or ( (model_configuration_name_value in model_configuration_force_refiner_object) and refining_selection_online_config_automatically_selected_field_value ) ) ) ): use_refiner = 1 if is_default_config_state == 1: if not refiner_denoise_start_valid(refining_denoise_start_for_default_config_field_value): error_function("Refiner denoise start is not valid.") else: if (model_configuration_name_value not in model_configuration_force_refiner_object): if not refiner_denoise_start_valid(refining_denoise_start_for_online_config_field_value): error_function("Refiner denoise start is not valid.") if (is_sdxl_online_config == 1): if not refining_steps_valid(refining_steps_for_sdxl_online_config_field_value): error_function("Refining steps option is not valid.") if use_upscaler == 1: if not upscaling_steps_valid(upscaling_num_inference_steps): error_function("Upscaling steps option is not valid.") if ( (last_model_configuration_name_selected_state_value == "") or (model_configuration_name_value != last_model_configuration_name_selected_state_value) ): if (last_model_configuration_name_selected_state_value != ""): if "pipe" in globals(): del pipe if show_messages_in_command_prompt == 1: print ("Base model is loading."); progress( progress = 0, desc = "Base model is loading" ) ( pipe ) = construct_pipe( base_model_name_value, model_configuration_name_value ) last_model_configuration_name_selected_state_value = model_configuration_name_value else: pipe = stored_pipe_state ( scheduler_used ) = configure_scheduler( pipe, scheduler_value ) if use_refiner == 1: if (last_refiner_name_selected_state_value == ""): if show_messages_in_command_prompt == 1: print ("Refiner is loading."); progress( progress = 0, desc = "Refiner is loading" ) refiner = construct_refiner() last_refiner_name_selected_state_value = "refiner" else: refiner = stored_refiner_state else: refiner = {} if use_upscaler == 1: if (last_upscaler_name_selected_state_value == ""): if show_messages_in_command_prompt == 1: print ("Upscaler is loading."); progress( progress = 0, desc = "Upscaler is loading" ) upscaler = construct_upscaler() last_upscaler_name_selected_state_value = "upscaler" else: upscaler = stored_upscaler_state else: upscaler = "" if log_generation_times == 1: start_time = time.time() # Only 77 tokens are allowed in the prompt. 2 are reserved, meaning it is # truncated to 75. This happens automatically, but we want to tell people # that tokenizer = pipe.tokenizer max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text)) token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text)) prompt_text_not_used_substring = "" message_about_prompt_truncation = "" if token_length_of_prompt_text > max_token_length_of_model: ( prompt_text, prompt_text_not_used_substring ) = truncate_prompt( pipe, prompt_text ) message_about_prompt_truncation += "Your prompt has been truncated because it is too long. This part has been truncated:

" + prompt_text_not_used_substring + "" negative_prompt_text_not_used_substring = "" if token_length_of_negative_prompt_text > max_token_length_of_model: ( negative_prompt_text, negative_prompt_text_not_used_substring ) = truncate_prompt( pipe, negative_prompt_text ) if len(message_about_prompt_truncation) > 0: message_about_prompt_truncation += "

" message_about_prompt_truncation += "Your negative prompt has been truncated because it is too long. This part has been truncated:

" + negative_prompt_text_not_used_substring + "" prompt_truncated_field_update = gr.HTML( value = "", visible = False ) if len(message_about_prompt_truncation) > 0: prompt_truncated_field_update = gr.HTML( value = "
Note: " + message_about_prompt_truncation + "
", visible = True ) show_message("Note: Part of your prompt has been truncated automatically because it was too long.") if show_image_creation_progress_log == 1: current_base_model_generation_start_time = 0 def callback_function_for_base_model_progress( callback_pipe, callback_step_index, callback_timestep, callback_kwargs ): global current_base_model_generation_start_time if int(callback_step_index) == 0: current_base_model_generation_start_time = time.time() if int(callback_step_index) > 0: seconds_per_step = ((time.time() - current_base_model_generation_start_time) / int(callback_step_index)) ( time_per_step_hours, time_per_step_minutes, time_per_step_seconds ) = convert_seconds(seconds_per_step) if time_per_step_hours > 0: hours_text = "hr" if time_per_step_hours > 1: hours_text = "hrs" nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." elif time_per_step_minutes > 0: nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." else: nice_time_per_step = str(round(time_per_step_seconds, 2)) + " seconds" base_model_progress_text = nice_time_per_step + " per step" else: base_model_progress_text = "Base model processing started" progress( progress = ( callback_step_index, current_actual_total_base_model_steps ), desc = base_model_progress_text, unit = "base model steps" ) return {} callback_to_do_for_base_model_progress = callback_function_for_base_model_progress current_refiner_generation_start_time = 0 def callback_function_for_refiner_progress( callback_pipe, callback_step_index, callback_timestep, callback_kwargs ): global current_refiner_generation_start_time if int(callback_step_index) == 0: current_refiner_generation_start_time = time.time() if int(callback_step_index) > 0: seconds_per_step = ((time.time() - current_refiner_generation_start_time) / int(callback_step_index)) ( time_per_step_hours, time_per_step_minutes, time_per_step_seconds ) = convert_seconds(seconds_per_step) if time_per_step_hours > 0: hours_text = "hr" if time_per_step_hours > 1: hours_text = "hrs" nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." elif time_per_step_minutes > 0: nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." else: nice_time_per_step = str(round(time_per_step_seconds, 2)) + " seconds" refiner_progress_text = nice_time_per_step + " per step" else: refiner_progress_text = "Refiner processing started" progress( progress = ( callback_step_index, current_actual_total_refiner_steps ), desc = refiner_progress_text, unit = "refiner steps" ) return {} callback_to_do_for_refiner_progress = callback_function_for_refiner_progress current_upscaler_generation_start_time = 0 def callback_function_for_upscaler_progress( callback_step_index, callback_timestep, callback_latents ): global current_upscaler_generation_start_time if int(callback_step_index) == 0: current_upscaler_generation_start_time = time.time() if int(callback_step_index) > 0: seconds_per_step = ((time.time() - current_upscaler_generation_start_time) / int(callback_step_index)) ( time_per_step_hours, time_per_step_minutes, time_per_step_seconds ) = convert_seconds(seconds_per_step) if time_per_step_hours > 0: hours_text = "hr" if time_per_step_hours > 1: hours_text = "hrs" nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." elif time_per_step_minutes > 0: nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." else: nice_time_per_step = str(round(time_per_step_seconds, 2)) + " seconds" upscaler_progress_text = nice_time_per_step + " per step" else: upscaler_progress_text = "Upscaler processing started" progress( progress = ( callback_step_index, current_actual_total_upscaler_steps ), desc = upscaler_progress_text, unit = "upscaler steps" ) return {} callback_to_do_for_upscaler_progress = callback_function_for_upscaler_progress else: callback_to_do_for_base_model_progress = None callback_to_do_for_refiner_progress = None callback_to_do_for_upscaler_progress = None generator = torch.manual_seed(actual_seed) if ( (is_sdxl_online_config == 1) or (is_photoreal_online_config == 1) ): # # # # Attempt To Match Online Configurations # # # prompt = prompt_text negative_prompt = negative_prompt_text width = image_width height = image_height scale = guidance_scale steps = base_model_num_inference_steps refining = use_refiner if refining == 1: refining = "Yes" upscaling = use_upscaler if upscaling == 1: upscaling = "Yes" prompt_2 = "" negative_prompt_2 = "" high_noise_frac = refining_denoise_start_for_online_config_field_value if (is_sdxl_online_config) == 1: add_seed_into_pipe = 1 n_steps = refining_steps_for_sdxl_online_config_field_value upscaling_num_inference_steps = 15 if model_configuration_name_value == "sdxl_2023-09-05": upscaling_num_inference_steps = 5 current_actual_total_upscaler_steps = upscaling_num_inference_steps if show_messages_in_command_prompt == 1: print ("Initial image creation has begun."); print ("Image steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Initial image creation has begun" ) int_image = pipe( prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_inference_steps=steps, height=height, width=width, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent", callback_on_step_end=callback_to_do_for_base_model_progress ).images if show_messages_in_command_prompt == 1: print ("Refiner steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Refining is beginning" ) current_actual_total_refiner_steps = (refining_steps_for_sdxl_online_config_field_value - round(refining_steps_for_sdxl_online_config_field_value * refining_denoise_start_for_online_config_field_value)) nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value) refiner_info_for_info_about_prompt_lines_array = [ "Refiner? Yes", "Refiner denoise start %: " + nice_refiner_denoise_start, "Refiner number of iterations: " + str(refining_steps_for_sdxl_online_config_field_value), "Actual Refining Steps: " + str(current_actual_total_refiner_steps) ] image = refiner( prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, num_inference_steps=n_steps, denoising_start=high_noise_frac, callback_on_step_end=callback_to_do_for_refiner_progress ).images[0] if upscaling == 'Yes': if show_messages_in_command_prompt == 1: print ("Upscaler steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Upscaling is beginning" ) # Changed # # num_inference_steps=15 # upscaled = upscaler( prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=upscaling_num_inference_steps, guidance_scale=0, callback=callback_to_do_for_upscaler_progress, callback_steps=1 ).images[0] if device == "cuda": torch.cuda.empty_cache() image_to_return = upscaled else: if device == "cuda": torch.cuda.empty_cache() image_to_return = image elif (is_photoreal_online_config == 1): add_seed_into_pipe = 0 Prompt = prompt upscale = refining # Not a mistake. This is wrong in code. if upscale == "Yes": # This will do refining, not upscaling if show_messages_in_command_prompt == 1: print ("Initial image creation has begun."); print ("Image steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Initial image creation has begun" ) int_image = pipe( Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, callback_on_step_end=callback_to_do_for_base_model_progress ).images if show_messages_in_command_prompt == 1: print ("Refiner steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Refining is beginning" ) default_steps_in_diffusers = 50 current_actual_total_refiner_steps = (default_steps_in_diffusers - round(default_steps_in_diffusers * refining_denoise_start_for_online_config_field_value)) refiner_info_for_info_about_prompt_lines_array = [ "Refiner? Yes", "Refiner denoise start %: " + nice_refiner_denoise_start, "Refiner number of iterations: " + str(default_steps_in_diffusers), "Actual Refining Steps: " + str(current_actual_total_refiner_steps) ] image = refiner( Prompt, negative_prompt=negative_prompt, image=int_image, num_inference_steps=default_steps_in_diffusers, denoising_start=high_noise_frac, callback_on_step_end=callback_to_do_for_refiner_progress ).images[0] else: if show_messages_in_command_prompt == 1: print ("Image creation has begun."); print ("Image steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Image creation has begun" ) image = pipe( Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, callback_on_step_end=callback_to_do_for_base_model_progress ).images[0] image_to_return = image else: if add_seed_into_pipe == 0: generator = None # # # # My Configurations # # # if use_refiner == 1: if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1: denoising_end = refining_denoise_start_for_default_config_field_value current_actual_total_base_model_steps = round(base_model_num_inference_steps * refining_denoise_start_for_default_config_field_value) else: denoising_end = None output_type_before_refiner = "pil" if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: output_type_before_refiner = "latent" current_actual_total_refiner_steps = (base_model_num_inference_steps - round(base_model_num_inference_steps * refining_denoise_start_for_default_config_field_value)) refiner_info_for_info_about_prompt_lines_array = [ "Refiner? Yes" ] nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value) if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1: refiner_info_for_info_about_prompt_lines_array.extend([ "Set \"denoising_end\" in base model generation? Yes", "Base model denoise end %: " + nice_refiner_denoise_start, "Actual Base Model Steps: " + str(current_actual_total_base_model_steps) ]) else: refiner_info_for_info_about_prompt_lines_array.extend([ "Set \"denoising_end\" in base model generation? No", ]) refiner_info_for_info_about_prompt_lines_array.extend([ "Refiner denoise start %: " + nice_refiner_denoise_start, "Actual Refining Steps: " + str(current_actual_total_refiner_steps) ]) if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: refiner_info_for_info_about_prompt_lines_array.extend([ "Base model output in latent space before refining? Yes", ]) else: refiner_info_for_info_about_prompt_lines_array.extend([ "Base model output in latent space before refining? No", ]) if use_upscaler == 1: if show_messages_in_command_prompt == 1: print ("Will create initial image, then refine and then upscale."); print ("Initial image steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Initial image creation has begun" ) intitial_image = pipe( prompt = prompt_text, negative_prompt = negative_prompt_text, width = image_width, height = image_height, num_inference_steps = base_model_num_inference_steps, guidance_scale = guidance_scale, num_images_per_prompt = 1, generator = generator, denoising_end = denoising_end, output_type = output_type_before_refiner, callback_on_step_end = callback_to_do_for_base_model_progress ).images if show_messages_in_command_prompt == 1: print ("Refiner steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Refining is beginning" ) refined_image = refiner( prompt = prompt_text, negative_prompt = negative_prompt_text, image = intitial_image, num_inference_steps = base_model_num_inference_steps, denoising_start = refining_denoise_start_for_default_config_field_value, output_type = "pil", callback_on_step_end = callback_to_do_for_refiner_progress ).images if show_messages_in_command_prompt == 1: print ("Upscaler steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Upscaling is beginning" ) upscaled_image = upscaler( prompt = prompt_text, negative_prompt = negative_prompt_text, image = refined_image, num_inference_steps = upscaling_num_inference_steps, guidance_scale = 0, callback = callback_to_do_for_upscaler_progress, callback_steps = 1 ).images[0] if device == "cuda": torch.cuda.empty_cache() image_to_return = upscaled_image else: if show_messages_in_command_prompt == 1: print ("Will create initial image and then refine."); print ("Initial image steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Initial image creation has begun" ) intitial_image = pipe( prompt = prompt_text, negative_prompt = negative_prompt_text, width = image_width, height = image_height, num_inference_steps = base_model_num_inference_steps, guidance_scale = guidance_scale, num_images_per_prompt = 1, generator = generator, denoising_end = denoising_end, output_type = output_type_before_refiner, callback_on_step_end = callback_to_do_for_base_model_progress ).images if show_messages_in_command_prompt == 1: print ("Refiner steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Refining is beginning" ) refined_image = refiner( prompt = prompt_text, negative_prompt = negative_prompt_text, image = intitial_image, num_inference_steps = base_model_num_inference_steps, denoising_start = refining_denoise_start_for_default_config_field_value, callback_on_step_end = callback_to_do_for_refiner_progress ).images[0] if device == "cuda": torch.cuda.empty_cache() image_to_return = refined_image else: if use_upscaler == 1: if show_messages_in_command_prompt == 1: print ("Will create initial image and then upscale."); print ("Initial image steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Initial image creation has begun" ) intitial_image = pipe( prompt = prompt_text, negative_prompt = negative_prompt_text, width = image_width, height = image_height, num_inference_steps = base_model_num_inference_steps, guidance_scale = guidance_scale, num_images_per_prompt = 1, generator = generator, output_type = "pil", callback_on_step_end = callback_to_do_for_base_model_progress ).images if show_messages_in_command_prompt == 1: print ("Upscaler steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Upscaling is beginning" ) upscaled_image = upscaler( prompt = prompt_text, negative_prompt = negative_prompt_text, image = intitial_image, num_inference_steps = upscaling_num_inference_steps, guidance_scale = 0, callback = callback_to_do_for_upscaler_progress, callback_steps = 1 ).images[0] if device == "cuda": torch.cuda.empty_cache() image_to_return = upscaled_image else: if show_messages_in_command_prompt == 1: print ("Will create image (no refining or upscaling)."); print ("Image steps..."); if show_image_creation_progress_log == 1: progress( progress = 0, desc = "Image creation has begun" ) image = pipe( prompt = prompt_text, negative_prompt = negative_prompt_text, width = image_width, height = image_height, num_inference_steps = base_model_num_inference_steps, guidance_scale = guidance_scale, num_images_per_prompt = 1, generator = generator, callback_on_step_end = callback_to_do_for_base_model_progress ).images[0] if device == "cuda": torch.cuda.empty_cache() image_to_return = image # # # # Prompt Information # # # nice_model_name = base_model_names_object[base_model_name_value] + " (" + model_configuration_links_object[model_configuration_name_value] + ")" info_about_prompt_lines_array = [ "Prompt: " + prompt_text ] if len(negative_prompt_text) > 0: info_about_prompt_lines_array.extend([ "Negative Prompt: " + negative_prompt_text ]) dimensions_title = "Dimensions" if use_upscaler == 1: dimensions_title = "Original Dimensions" info_about_prompt_lines_array.extend([ dimensions_title + ": " + str(image_width) + "x" + str(image_height) + " px" ]) if use_upscaler == 1: upscaled_image_width = int(image_width * 2) upscaled_image_height = int(image_height * 2) info_about_prompt_lines_array.extend([ "Upscaled Dimensions: " + str(upscaled_image_width) + "x" + str(upscaled_image_height) + " px" ]) info_about_prompt_lines_array.extend([ "Seed: " + str(actual_seed) ]) nice_seed_added_to_generation = "No" if add_seed_into_pipe == 1: nice_seed_added_to_generation = "Yes" info_about_prompt_lines_array.extend([ "Seed added to generation? " + nice_seed_added_to_generation ]) if int(guidance_scale) > 0: info_about_prompt_lines_array.extend([ "Guidance Scale: " + str(guidance_scale) ]) nice_scheduler_name = scheduler_short_names_object[scheduler_used] if scheduler_value == "model_default": nice_scheduler_name += " (model default)" info_about_prompt_lines_array.extend([ "Steps: " + str(base_model_num_inference_steps), "Model: " + nice_model_name, "Scheduler/Sampler: " + nice_scheduler_name ]) if use_refiner == 1: # Default Configuration info_about_prompt_lines_array.extend(refiner_info_for_info_about_prompt_lines_array) if use_upscaler == 1: info_about_prompt_lines_array.extend([ "Upscaled (2x)? Yes", "Upscaler Steps: " + str(current_actual_total_upscaler_steps) ]) if log_generation_times == 1: end_time = time.time() generation_time_in_seconds = (end_time - start_time) ( generation_partial_hours, generation_partial_minutes, generation_partial_seconds ) = convert_seconds(generation_time_in_seconds) if generation_partial_hours > 0: hours_text = "hr" if generation_partial_hours > 1: hours_text = "hrs" nice_generation_time = str(int(generation_partial_hours)) + " " + hours_text + ". " + str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec." elif generation_partial_minutes > 0: nice_generation_time = str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec." else: nice_generation_time = str(round(generation_time_in_seconds, 2)) + " sec." info_about_prompt_lines_array.extend([ "Time: " + nice_generation_time ]) if len(prompt_text_not_used_substring) > 0: info_about_prompt_lines_array.extend([ "End of Prompt Truncated: " + prompt_text_not_used_substring ]) if len(negative_prompt_text_not_used_substring) > 0: info_about_prompt_lines_array.extend([ "End of Negative Prompt Truncated: " + negative_prompt_text_not_used_substring ]) if display_xformers_usage_in_prompt_info > 0: nice_xformers_usage = "No" if use_xformers == 1: nice_xformers_usage = "Yes" if include_transformers_version_in_prompt_info == 1: import transformers nice_xformers_usage += " (version " + str(transformers.__version__) + ")" info_about_prompt_lines_array.extend([ "xFormers Used?: " + nice_xformers_usage ]) if display_default_attn_processor_usage_in_prompt_info > 0: nice_default_attn_processor_usage = "No" if use_default_attn_processor == 1: nice_default_attn_processor_usage = "Yes" info_about_prompt_lines_array.extend([ "Default AttnProcessor Used? " + nice_default_attn_processor_usage ]) info_about_prompt = "\n".join(info_about_prompt_lines_array) output_text_field_update = info_about_prompt if add_generation_information_to_image == 1: # Add generation info to image info_to_save_in_image = "\n-----------\nImage generation information:\n" + info_about_prompt + "\n-----------\n" image_to_return.info = {"parameters": info_to_save_in_image} if auto_save_imagery == 1: if not os.path.exists(saved_images_dir): os.makedirs(saved_images_dir) yy_mm_dd_date_stamp = datetime.today().strftime('%Y-%m-%d') saved_images_date_dir = saved_images_dir + "/" + yy_mm_dd_date_stamp + "/" if not os.path.exists(saved_images_date_dir): os.makedirs(saved_images_date_dir) image_count = 1 file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count) saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png" while os.path.exists(saved_image_path_and_file): file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count) saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png" image_count += 1 if add_generation_information_to_image == 1: from PIL.PngImagePlugin import PngInfo saved_image_metadata = PngInfo() saved_image_metadata.add_text( "parameters", info_to_save_in_image ) image_to_return_file = image_to_return.save( saved_image_path_and_file, pnginfo = saved_image_metadata ) else: image_to_return_file = image_to_return.save( saved_image_path_and_file ) saved_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt" prompt_info_file_handle = open(saved_text_file_path_and_file, "w") prompt_info_file_handle.writelines(output_text_field_update) prompt_info_file_handle.close() output_image_field_update = gr.Image( value = image_to_return ) image_gallery_array_state_value.insert(0, image_to_return) prompt_information_array_state_value.insert(0, output_text_field_update) output_image_gallery_field_update = gr.Gallery( value = image_gallery_array_state_value, selected_index = 0 ) image_gallery_array_state_update = image_gallery_array_state_value prompt_information_array_state_update = prompt_information_array_state_value if show_messages_in_command_prompt == 1: print ("Image created.") last_model_configuration_name_selected_state_update = last_model_configuration_name_selected_state_value last_refiner_name_selected_state_update = last_refiner_name_selected_state_value last_upscaler_name_selected_state_update = last_upscaler_name_selected_state_value return ( output_image_field_update, output_image_gallery_field_update, output_text_field_update, prompt_truncated_field_update, last_model_configuration_name_selected_state_update, last_refiner_name_selected_state_update, last_upscaler_name_selected_state_update, pipe, refiner, upscaler ) ##################### # # Cancel Image Processing # # When running on Windows, this is an attempt at closing the command # prompt from the web display. It's really not worth having this. You can # just close the prompt. I would like a nice way to cancel image # creation, but couldn't figure that out. # ##################### def cancel_image_processing(): # I simply don't know how to stop the image generation without closing # the command prompt. Doing that requires the code below twice for some # reason. # # Method: # https://stackoverflow.com/questions/67146623/how-to-close-the-command-prompt-from-python-script-directly gr.Warning("The command prompt window has been closed. Any image generation in progress has been stopped. To generate any other images, you will need to launch the command prompt again.") os.system('title kill_window') os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"') os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"') ##################### # # Download Data From HuggingFace # # This will download a lot of data at once rather than waiting until you # use each model. This is access by having this at the end of the URL: # # ?download_data=1 # # Like this: # http://127.0.0.1:7860/?download_data=1 # # A "1" will download only the default model configuration for each main # model, as well as refiner and upscaler data. A "2" will download all # model data needed in "model_configuration_links_object", including # online configurations you may not want. # ##################### def download_data_from_huggingface( download_data_option ): if ( (script_being_run_on_hugging_face == 0) and ("HF_HUB_OFFLINE" in os.environ) and (int(os.environ["HF_HUB_OFFLINE"]) == 0) ): data_to_get_partial_message = "the default model configuration defined in \"base_model_model_configuration_defaults_object\" for that model will be downloaded. It accesses data that is linked in \"model_configuration_links_object\"." if download_data_option == "2": data_to_get_partial_message = "all model data, for each model configuration, will be downloaded. This is defined in \"base_model_object_of_model_configuration_arrays\" and accesses data that is linked in \"model_configuration_links_object\"." download_data_message = "For each model in the model dropdown (\"base_model_array\"), " + data_to_get_partial_message + " That could easily be dozens of gigabytes of data or more that is about to be downloaded. If you want to stop the download, close the command prompt." print (download_data_message) data_links_downloaded_object = {} for this_base_model in base_model_array: base_model_name_value = this_base_model if download_data_option == "1": default_model_configuration_for_this_base_model = base_model_model_configuration_defaults_object[this_base_model] print ("Downloading/loading \"" + this_base_model + "\" model data for \"" + default_model_configuration_for_this_base_model + "\"...") model_configuration_name_value = default_model_configuration_for_this_base_model construct_pipe ( base_model_name_value, model_configuration_name_value ) else: for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: if ( (this_model_configuration in model_configuration_names_object) and (this_model_configuration in model_configuration_links_object) ): model_configuration_name_value = this_model_configuration model_configuration_link_value = model_configuration_links_object[this_model_configuration] if model_configuration_link_value not in data_links_downloaded_object: print ("Downloading/loading \"" + this_base_model + "\" model data from \"" + model_configuration_link_value + "\"...") construct_pipe ( base_model_name_value, model_configuration_name_value ) data_links_downloaded_object[model_configuration_link_value] = 1 print ("Downloading/loading refiner data...") construct_refiner() print ("Downloading/loading upscaler data...") construct_upscaler() print ("The data has been downloaded.") else: error_function("In order to download model data, \"HF_HUB_OFFLINE\" must be set to \"0\" in the Windows .bat file that launched this script.") ##################### # # Get Query Params # # Get variables from the url of the page and update the display to # reflect them. # ##################### def get_query_params( request: gr.Request ): raw_url_params = str(request.query_params) import urllib.parse unprocessed_url_object = urllib.parse.parse_qs(raw_url_params) url_object = {} for url_param_key in unprocessed_url_object: url_param_value = unprocessed_url_object[url_param_key][0] if len(url_param_value) > 0: url_param_key = str(url_param_key) url_param_key_lowercase = url_param_key.lower() url_object[url_param_key_lowercase] = str(unprocessed_url_object[url_param_key][0]).lower() field_object = {} base_model_name_value = default_base_model base_model_field_key_in_url = "model" if base_model_field_key_in_url in url_object: base_model_field_in_url = url_object[base_model_field_key_in_url].lower() if base_model_valid(base_model_field_in_url): base_model_name_value = base_model_field_in_url base_model_nicely_named_value = base_model_names_object[base_model_name_value] field_object.update({base_model_field: base_model_nicely_named_value}) download_data_key_in_url = "download_data" if download_data_key_in_url in url_object: download_data_option_in_url = str(url_object[download_data_key_in_url]) if ( (download_data_option_in_url == "1") or (download_data_option_in_url == "2") ): download_data_from_huggingface(download_data_option_in_url) model_configuration_key_in_url = "model_config" model_configuration_in_url = "" if model_configuration_key_in_url in url_object: model_configuration_in_url = url_object[model_configuration_key_in_url].lower() for this_base_model in base_model_array: if base_model_name_value == this_base_model: model_configuration_name_value = base_model_model_configuration_defaults_object[this_base_model] if len(model_configuration_in_url) > 0: for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: if model_configuration_valid(base_model_name_value, model_configuration_in_url): model_configuration_name_value = this_model_configuration field_object.update({initial_model_configuration_name_selected_state: model_configuration_in_url}) prompt_field_key_in_url = "prompt" if prompt_field_key_in_url in url_object: prompt_field_in_url = url_object[prompt_field_key_in_url].lower() if prompt_valid(prompt_field_in_url): field_object.update({prompt_field: prompt_field_in_url}) negative_prompt_field_key_in_url = "neg_prompt" if negative_prompt_field_key_in_url in url_object: negative_prompt_field_in_url = url_object[negative_prompt_field_key_in_url].lower() if prompt_valid(negative_prompt_valid): field_object.update({negative_prompt_field: negative_prompt_field_in_url}) scheduler_field_key_in_url = "scheduler" if scheduler_field_key_in_url in url_object: scheduler_field_in_url = url_object[scheduler_field_key_in_url].lower() if scheduler_valid(scheduler_field_in_url): scheduler_name_value = scheduler_field_in_url scheduler_nicely_named_value = scheduler_long_names_object[scheduler_name_value] field_object.update({scheduler_field: scheduler_nicely_named_value}) image_width_field_key_in_url = "width" if image_width_field_key_in_url in url_object: image_width_field_in_url = str(url_object[image_width_field_key_in_url]) if width_valid(image_width_field_in_url): field_object.update({image_width_field: image_width_field_in_url}) image_height_field_key_in_url = "height" if image_height_field_key_in_url in url_object: image_height_field_in_url = str(url_object[image_height_field_key_in_url]) if height_valid(image_height_field_in_url): field_object.update({image_height_field: image_height_field_in_url}) guidance_scale_field_key_in_url = "guidance" if guidance_scale_field_key_in_url in url_object: guidance_scale_field_in_url = str(url_object[guidance_scale_field_key_in_url]) if guidance_scale_valid(guidance_scale_field_in_url): field_object.update({guidance_scale_field: guidance_scale_field_in_url}) steps_key_in_url = "steps" if steps_key_in_url in url_object: steps_in_url = str(url_object[steps_key_in_url]) if steps_valid(steps_in_url, base_model_name_value): if base_model_name_value == "sdxl_turbo": field_object.update({base_model_steps_field_for_sdxl_turbo_field: steps_in_url}) else: field_object.update({base_model_steps_field: steps_in_url}) seed_field_key_in_url = "seed" if seed_field_key_in_url in url_object: seed_field_in_url = url_object[seed_field_key_in_url] if seed_valid(seed_field_in_url): field_object.update({seed_field: seed_field_in_url}) add_seed_key_in_url = "add_seed" if add_seed_key_in_url in url_object: add_seed_in_url = url_object[add_seed_key_in_url].lower() add_seed_to_generation = True if ( (add_seed_in_url == "0") or (add_seed_in_url == "n") or (add_seed_in_url == "no") or (add_seed_in_url == "false") ): add_seed_to_generation = False field_object.update({add_seed_into_pipe_field: add_seed_to_generation}) is_default_config_state = 1 if model_configuration_name_value in online_configurations_object: is_default_config_state = 0 refiner_key_in_url = "refiner" if refiner_key_in_url in url_object: refiner_in_url = url_object[refiner_key_in_url].lower() refiner_in_url_formatted = "No" if ( (refiner_in_url == "1") or (refiner_in_url == "y") or (refiner_in_url == "yes") or (refiner_in_url == "true") ): refiner_in_url_formatted = "Yes" field_object.update({refining_selection_default_config_field: refiner_in_url_formatted}) field_object.update({refining_selection_online_config_normal_field: refiner_in_url_formatted}) refiner_denoise_start_key_in_url = "denoise_start" if refiner_denoise_start_key_in_url in url_object: refiner_denoise_start_in_url = str(url_object[refiner_denoise_start_key_in_url]) if refiner_denoise_start_valid(refiner_denoise_start_in_url): field_object.update({refining_denoise_start_for_default_config_field: refiner_denoise_start_in_url}) field_object.update({refining_denoise_start_for_online_config_field: refiner_denoise_start_in_url}) refining_steps_key_in_url = "refiner_steps" if refining_steps_key_in_url in url_object: refining_steps_in_url = str(url_object[refining_steps_key_in_url]) if refining_steps_valid(refining_steps_in_url): field_object.update({refining_steps_for_sdxl_online_config_field: refining_steps_in_url}) use_denoising_start_in_base_model_when_using_refiner_key_in_url = "use_denoise_end" if use_denoising_start_in_base_model_when_using_refiner_key_in_url in url_object: use_denoising_start_in_base_model_when_using_refiner_in_url = url_object[use_denoising_start_in_base_model_when_using_refiner_key_in_url].lower() use_denoising_start_in_base_model_when_using_refiner_bool = True if ( (use_denoising_start_in_base_model_when_using_refiner_in_url == "0") or (use_denoising_start_in_base_model_when_using_refiner_in_url == "n") or (use_denoising_start_in_base_model_when_using_refiner_in_url == "no") or (use_denoising_start_in_base_model_when_using_refiner_in_url == "false") ): use_denoising_start_in_base_model_when_using_refiner_bool = False field_object.update({refining_use_denoising_start_in_base_model_when_using_refiner_field: use_denoising_start_in_base_model_when_using_refiner_bool}) base_model_output_to_refiner_is_in_latent_space_key_in_url = "latent_space_before_refiner" if base_model_output_to_refiner_is_in_latent_space_key_in_url in url_object: base_model_output_to_refiner_is_in_latent_space_in_url = url_object[base_model_output_to_refiner_is_in_latent_space_key_in_url].lower() base_model_output_to_refiner_is_in_latent_space_bool = True if ( (base_model_output_to_refiner_is_in_latent_space_in_url == "0") or (base_model_output_to_refiner_is_in_latent_space_in_url == "n") or (base_model_output_to_refiner_is_in_latent_space_in_url == "no") or (base_model_output_to_refiner_is_in_latent_space_in_url == "false") ): base_model_output_to_refiner_is_in_latent_space_bool = False field_object.update({refining_base_model_output_to_refiner_is_in_latent_space_field: base_model_output_to_refiner_is_in_latent_space_bool}) upscaler_key_in_url = "upscaler" if upscaler_key_in_url in url_object: upscaler_in_url = url_object[upscaler_key_in_url].lower() upscaler_in_url_formatted = "No" if ( (upscaler_in_url == "1") or (upscaler_in_url == "y") or (upscaler_in_url == "yes") or (upscaler_in_url == "true") ): upscaler_in_url_formatted = "Yes" field_object.update({upscaling_selection_field: upscaler_in_url_formatted}) upscaling_steps_key_in_url = "upscaler_steps" if upscaling_steps_key_in_url in url_object: upscaling_steps_in_url = str(url_object[upscaling_steps_key_in_url]) if upscaling_steps_valid(upscaling_steps_in_url): field_object.update({upscaling_num_inference_steps_field: upscaling_steps_in_url}) generate_image_btn_update = gr.Button( interactive = True ) field_object.update({generate_image_btn: generate_image_btn_update}) return field_object ##################### # # Set Base Model and Model Configuration from query_params # # We need to handle this separate because the model configuration needs # to return those field components in order. They are not named as they # are dynamic. # ##################### def set_base_model_and_model_configuration_from_query_params( base_model_field_index, initial_model_configuration_name_selected_state_value, *model_configuration_dropdown_fields_array ): base_model_name_value = base_model_array[base_model_field_index] model_configuration_name_value_for_selected_base_model = initial_model_configuration_name_selected_state_value model_configuration_dropdown_fields_array = [] for this_base_model in base_model_array: model_configuration_name_default_value_for_this_base_model = base_model_model_configuration_defaults_object[this_base_model] for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: if ( (base_model_name_value == this_base_model) and (model_configuration_name_value_for_selected_base_model == this_model_configuration) ): model_configuration_name_default_value_for_this_base_model = model_configuration_name_value_for_selected_base_model this_configuration_field_default_value = model_configuration_names_object[model_configuration_name_default_value_for_this_base_model] this_configuration_field = gr.Dropdown( value = this_configuration_field_default_value ) model_configuration_dropdown_fields_array.append(this_configuration_field) base_model_and_model_configuration_return_outputs = [] for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: base_model_and_model_configuration_return_outputs.append( this_model_configuration_dropdown_field ) return base_model_and_model_configuration_return_outputs ############################################################################### ############################################################################### # # # # # # # Create Web Display # # # # # # ############################################################################### ############################################################################### # Hide border when yield is used: # https://github.com/gradio-app/gradio/issues/5479 # .generating {border: none !important;} css_to_use = "footer{display:none !important}" with gr.Blocks( title = "Spaghetti AI", css = css_to_use, theme = gr.themes.Default( spacing_size = gr.themes.sizes.spacing_md, # spacing_size = gr.themes.sizes.spacing_sm, radius_size = gr.themes.sizes.radius_none ), analytics_enabled = False ) as sd_interface: # Variables to store for user session image_gallery_array_state = gr.State([]) prompt_information_array_state = gr.State([]) initial_model_configuration_name_selected_state = gr.State("") last_model_configuration_name_selected_state = gr.State("") last_refiner_name_selected_state = gr.State("") last_upscaler_name_selected_state = gr.State("") stored_pipe_state = gr.State({}) stored_refiner_state = gr.State({}) stored_upscaler_state = gr.State({}) gr.Markdown(opening_html) with gr.Row(): with gr.Column(scale = 1): generate_image_btn = gr.Button( value = "Generate", variant = "primary", interactive = False ) with gr.Group(): with gr.Row(): prompt_field = gr.Textbox( label = "Prompt (77 token limit):", value = default_prompt ) with gr.Row( elem_id = "negative_prompt_field_row_id", visible = default_negative_prompt_field_row_visibility ): negative_prompt_field = gr.Textbox( label = "Negative Prompt (77 token limit):", value = default_negative_prompt ) with gr.Row( elem_id = "negative_prompt_for_sdxl_turbo_field_row_id", visible = default_negative_prompt_for_sdxl_turbo_field_row_visibility ): negative_prompt_for_sdxl_turbo_field = gr.HTML( value = "
Negative prompt is not used for SDXL Turbo.
" ) with gr.Group( visible = refiner_group_visible ): refiner_label_text = "Refiner" if allow_online_configurations == 1: refiner_label_text = "Refiner (Default configuration)" with gr.Accordion( elem_id = "refiner_default_config_accordion_id", label = refiner_label_text, open = refiner_default_config_accordion_open, visible = refiner_default_config_accordion_visible ) as refiner_default_config_accordion: # # # # Refiner (Default configuration) # # # with gr.Row(): gr.Markdown("This can be used if the image has too much noise.") with gr.Row(): refining_selection_default_config_field = gr.Radio( choices = ["Yes", "No"], value = default_refine_option, show_label = False, container = False ) with gr.Row(): refining_denoise_start_for_default_config_field = gr.Slider( label = "Refiner denoise start %", minimum = minimum_refiner_denoise_start, maximum = maximum_refiner_denoise_start, value = default_refiner_denoise_start, step = refiner_denoise_start_input_slider_steps ) with gr.Row(): refining_use_denoising_start_in_base_model_when_using_refiner_field = gr.Checkbox( label = "Use \"denoising_start\" value as \"denoising_end\" value in base model generation when using refiner", value = default_use_denoising_start_in_base_model_when_using_refiner_is_selected, interactive = True, container = True ) with gr.Row(): refining_base_model_output_to_refiner_is_in_latent_space_field = gr.Checkbox( label = "Base model output in latent space instead of PIL image when using refiner", value = default_base_model_output_to_refiner_is_in_latent_space_is_selected, interactive = True, container = True ) with gr.Accordion( elem_id = "refiner_online_config_accordion_id", label = "Refiner (Online configuration)", open = refiner_online_config_accordion_open, visible = refiner_online_config_accordion_visible ) as refiner_online_config_accordion: # # # # Refiner (Online configuration) # # # refining_selection_automatically_selected_message_field_visible = False refining_selection_online_config_normal_field_visible = True refining_selection_online_config_automatically_selected_field_visible = False if model_configuration_requires_refiner == 1: refining_selection_automatically_selected_message_field_visible = True refining_selection_online_config_normal_field_visible = False refining_selection_online_config_automatically_selected_field_visible = True with gr.Row(): refining_selection_automatically_selected_message_field = gr.Markdown( elem_id = "refining_selection_automatically_selected_message_field_id", value = "The online configuration you selected automatically uses the refiner.", visible = refining_selection_automatically_selected_message_field_visible ) with gr.Row(): refining_selection_online_config_normal_field = gr.Radio( elem_id = "refining_selection_online_config_normal_field_id", choices = ["Yes", "No"], value = default_refine_option, show_label = False, container = False, visible = refining_selection_online_config_normal_field_visible ) with gr.Row(): refining_selection_online_config_automatically_selected_field = gr.Radio( elem_id = "refining_selection_online_config_automatically_selected_field_id", choices = ["Yes"], value = "Yes", show_label = False, container = False, visible = refining_selection_online_config_automatically_selected_field_visible ) with gr.Row(): refining_denoise_start_for_online_config_field = gr.Slider( label = "Refiner denoise start %", minimum = minimum_refiner_denoise_start, maximum = maximum_refiner_denoise_start, value = default_refiner_denoise_start, step = refiner_denoise_start_input_slider_steps ) refining_steps_for_sdxl_online_config_field_visible = False if default_model_configuration in model_configuration_include_refiner_number_of_steps_object: refining_steps_for_sdxl_online_config_field_visible = True with gr.Row( elem_id = "refining_steps_for_sdxl_online_config_field_row_id", visible = refining_steps_for_sdxl_online_config_field_visible ): refining_steps_for_sdxl_online_config_field = gr.Slider( label = "Refining steps:", minimum = 1, maximum = maximum_refining_steps_for_online_config_field, value = default_refining_steps_for_online_config_field, step = 1 ) with gr.Group( visible = upscaler_group_visible ): with gr.Accordion( label = "Upscaler", elem_id = "upscaler_accordion_id", open = upscaler_accordion_open, visible = upscaler_group_visible ): # # # # Upscaler # # # with gr.Row(): gr.Markdown("Upscale by 2x?") with gr.Row(): upscaling_selection_field = gr.Radio( choices = ["Yes", "No"], value = default_upscale_option, show_label = False, container = False ) default_upscaling_num_inference_steps_field_row_visibility = True if default_model_configuration in model_configuration_hide_upscaler_steps_object: default_upscaling_num_inference_steps_field_row_visibility = False with gr.Row( elem_id = "upscaling_num_inference_steps_field_row_id", visible = default_upscaling_num_inference_steps_field_row_visibility ): upscaling_num_inference_steps_field = gr.Slider( label = "Upscaler number of steps", minimum = 1, maximum = maximum_upscaler_steps, value = default_upscaler_steps, step = 1 ) if ( (enable_refiner == 1) or (enable_upscaler == 1) ): refiner_and_upscaler_text_field = gr.HTML( value = "
" + default_refiner_and_upscaler_status_text + "
" ) with gr.Column(scale = 1): with gr.Group(): with gr.Row(): base_model_field = gr.Dropdown( label = "Model:", choices = default_base_model_choices_array, value = default_base_model_nicely_named_value, type = "index", filterable = False, #min_width = 240, interactive = True ) model_configuration_dropdown_field_values_for_js = "" model_configuration_dropdown_fields_array = [] for this_base_model in base_model_array: this_model_configuration_choices_array = [] for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: this_model_configuration_choices_array.append( model_configuration_names_object[this_model_configuration] ) this_configuration_field_row_visibility = False if ( (this_base_model == default_base_model) and (allow_online_configurations == 1) ): this_configuration_field_row_visibility = True this_configuration_field_default_value = model_configuration_names_object[base_model_model_configuration_defaults_object[this_base_model]] this_configuration_field_default_value_for_js = this_configuration_field_default_value this_configuration_field_default_value_for_js = this_configuration_field_default_value_for_js.replace("\"", "\\\"") model_configuration_dropdown_field_values_for_js += "\"" + this_base_model + "\": \"" + this_configuration_field_default_value_for_js + "\"," with gr.Row( elem_id = "model_configuration_field_" + this_base_model + "_row_id", visible = this_configuration_field_row_visibility ): this_configuration_field = gr.Dropdown( label = "Configuration Type:", choices = this_model_configuration_choices_array, value = this_configuration_field_default_value, type = "index", filterable = False, #min_width = 240, interactive = True ) model_configuration_dropdown_fields_array.append(this_configuration_field) with gr.Row(): scheduler_field = gr.Dropdown( elem_id = "scheduler_field_id", label = "Scheduler / Sampler:", choices = default_scheduler_choices_array, value = default_scheduler_nicely_named_value, type = "index", filterable = False, #min_width = 240, interactive = True ) with gr.Row(): image_width_field = gr.Slider( label = "Width:", minimum = minimum_width, maximum = maximum_width, value = default_width, step = width_and_height_input_slider_steps, interactive = True ) image_height_field = gr.Slider( label = "Height:", minimum = minimum_height, maximum = maximum_height, value = default_height, step = width_and_height_input_slider_steps, interactive = True ) with gr.Row( elem_id = "base_model_steps_field_row_id", visible = default_base_model_steps_field_row_visibility ): base_model_steps_field = gr.Slider( label = "Steps:", minimum = 1, maximum = maximum_base_model_steps, value = default_base_model_steps, step = 1, interactive = True ) with gr.Row( elem_id = "base_model_steps_field_for_sdxl_turbo_field_row_id", visible = default_base_model_steps_field_for_sdxl_turbo_field_row_visibility ): base_model_steps_field_for_sdxl_turbo_field = gr.Slider( label = "Steps:", info = "Try using only 1 or a couple of steps.", minimum = 1, maximum = maximum_base_model_steps_for_sdxl_turbo, value = default_base_model_steps_for_sdxl_turbo, step = 1, interactive = True ) with gr.Row( elem_id = "guidance_scale_field_row_id", visible = default_guidance_scale_field_row_visibility ): guidance_scale_field = gr.Slider( label = "Guidance Scale:", minimum = minimum_guidance_scale, maximum = maximum_guidance_scale, value = default_guidance_scale, step = guidance_scale_input_slider_steps, interactive = True ) with gr.Row( elem_id = "guidance_scale_for_sdxl_turbo_field_row_id", visible = default_guidance_scale_for_sdxl_turbo_field_row_visibility ): guidance_scale_for_sdxl_turbo_field = gr.HTML( value = "
Guidance scale is not used for SDXL Turbo.
" ) with gr.Row(): if default_seed_value == "random": default_seed_value = generate_random_seed() # If you use a slider or number field for the seed, some # seeds can't be duplicated using those fields. If you # enter a number greater than 9007199254740992, the seed # won't reliably be used. This is a technical limitation # as of writing this. See the bug report here: # https://github.com/gradio-app/gradio/issues/5354 # # Until this is fixed, we use a textbox if the max seed # allowed is greater than that number. Using the slider, # and not entering a number, might be the way to get # reliable numbers above that number, if you just don't # then use the up and down arrows in the field to go up # or down a number. # # For now, I do this, but I might eventually have a # setting on the page to allow the slider. if make_seed_selection_a_textbox == 1: seed_field = gr.Textbox( label = "Seed:", value = default_seed_value, interactive = True ) else: seed_field = gr.Slider( label = "Seed:", minimum = 0, maximum = maximum_seed, value = default_seed_value, step = 1, interactive = True ) with gr.Row( elem_id = "add_seed_into_pipe_field_row_id", visible = default_add_seed_into_pipe_field_row_visibility ): add_seed_into_pipe_field = gr.Checkbox( label = "Add seed to generation (to make it deterministic)", value = default_add_seed_into_pipe_is_selected, interactive = True, container = True ) with gr.Column(scale = 1): image_field_visibility = True image_gallery_field_visibility = False if use_image_gallery == 1: image_field_visibility = False image_gallery_field_visibility = True with gr.Row( visible = image_field_visibility ): output_image_field = gr.Image( label = "Generated Image", type = "pil" ) with gr.Row( visible = image_gallery_field_visibility ): show_download_button = False show_download_button = True output_image_gallery_field = gr.Gallery( elem_id = "image_gallery_id", label = "Generated Images", value = [], selected_index = 0, allow_preview = "True", preview = True, show_download_button = show_download_button ) with gr.Row(): output_text_field = gr.Textbox( label = "Prompt Information:", value = "After an image is generated, its generation information will appear here. All of this information is also embedded in the image itself. If you open the image in a text program, it will appear at the top." + additional_prompt_info_html, show_copy_button = True, lines = 10, max_lines = 20, container = True ) with gr.Row(): prompt_truncated_field = gr.HTML( value = "", visible = False ) if enable_close_command_prompt_button == 1: cancel_image_btn = gr.Button( value = "Close Command Prompt / Cancel", variant = "stop" ) gr.Markdown("Closing the command prompt will cancel any images in the process of being created. You will need to launch it again, and then likely refresh the page, to create more images.") if len(ending_html) > 0: with gr.Accordion( elem_id = "information_section_id", label = "Information", open = True ): gr.Markdown(ending_html) ##################### # # Update Refiner and Upscaler Status Function for Javascript # # When the refiner or upscaler is turned on or off, a text message is # printed on the page. That needs to be updated. # ##################### update_refiner_and_upscaler_status_function_js = """ async ( baseModelFieldFullNameValue, refiningSelectionDefaultConfigFieldValue, refiningSelectionOnlineConfigNormalFieldValue, refiningSelectionOnlineConfigAutomaticallySelectedFieldValue, upscalingSelectionFieldValue ) => {{ "use strict"; var baseModelNamesObject = {0}; var modelConfigurationNamesObject = {1}; var onlineConfigurationsObject = {2}; var modelConfigurationForceRefinerObject = {3}; var refinerOnText = "{4}"; var refinerOffText = "{5}"; var upscalerOnText = "{6}"; var upscalerOffText = "{7}"; var baseModelFullNamesToBaseModelIdConversion = {{}}; Object.keys(baseModelNamesObject).forEach(key => {{ baseModelFullNamesToBaseModelIdConversion[baseModelNamesObject[key]] = key; }}); var baseModelFieldValue = ""; if (baseModelFullNamesToBaseModelIdConversion.hasOwnProperty(baseModelFieldFullNameValue)) {{ baseModelFieldValue = baseModelFullNamesToBaseModelIdConversion[baseModelFieldFullNameValue]; }} var modelConfigurationFullNameValue = window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue]; var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}}; Object.keys(modelConfigurationNamesObject).forEach(key => {{ modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key; }}); var modelConfigurationNameValue = ""; if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{ modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue]; }} var refinerAndUpscalerInfoMessageHtml = ""; if ( baseModelFieldValue && modelConfigurationNameValue ) {{ var isDefaultConfigState = 1; if (onlineConfigurationsObject[modelConfigurationNameValue]) {{ isDefaultConfigState = 0; }} if ( ( (isDefaultConfigState === 1) && (refiningSelectionDefaultConfigFieldValue === "Yes") ) || ( (isDefaultConfigState !== 1) && ( ( (!Object.keys(modelConfigurationForceRefinerObject).includes(modelConfigurationNameValue)) && (refiningSelectionOnlineConfigNormalFieldValue === "Yes") ) || ( (Object.keys(modelConfigurationForceRefinerObject).includes(modelConfigurationNameValue)) && (refiningSelectionOnlineConfigAutomaticallySelectedFieldValue === "Yes") ) ) ) ) {{ refinerAndUpscalerInfoMessageHtml += refinerOnText; }} else {{ refinerAndUpscalerInfoMessageHtml += refinerOffText; }} if (upscalingSelectionFieldValue === "Yes") {{ refinerAndUpscalerInfoMessageHtml += upscalerOnText; }} else {{ refinerAndUpscalerInfoMessageHtml += upscalerOffText; }} }} document.getElementById("refiner_and_upscaler_info_message_div_id").innerHTML = refinerAndUpscalerInfoMessageHtml; }} """.format( base_model_names_object, model_configuration_names_object, online_configurations_object, model_configuration_force_refiner_object, refiner_on_text, refiner_off_text, upscaler_on_text, upscaler_off_text ) ##################### # # Model Change Function for Javascript # # When the base model or model configuration is changed, we may need # to show and hide certain fields. # ##################### model_change_function_js = """ async ( baseModelFieldFullNameValue, possiblyModelConfigurationFullNameValue ) => {{ "use strict"; var baseModelNamesObject = {0}; var modelConfigurationNamesObject = {1}; var baseModelArray = {2}; var onlineConfigurationsObject = {3}; var modelConfigurationForceRefinerObject = {4}; var modelConfigurationIncludeRefinerNumberOfStepsObject = {5}; var modelConfigurationHideUpscalerStepsObject = {6}; var allowOnlineConfigurations = {7}; var baseModelFullNamesToBaseModelIdConversion = {{}}; Object.keys(baseModelNamesObject).forEach(key => {{ baseModelFullNamesToBaseModelIdConversion[baseModelNamesObject[key]] = key; }}); var baseModelFieldValue = ""; if (baseModelFullNamesToBaseModelIdConversion.hasOwnProperty(baseModelFieldFullNameValue)) {{ baseModelFieldValue = baseModelFullNamesToBaseModelIdConversion[baseModelFieldFullNameValue]; }} var modelConfigurationFullNameValue = "" var isBaseModelDropdownChange = 0 if (baseModelFieldFullNameValue === possiblyModelConfigurationFullNameValue) {{ isBaseModelDropdownChange = 1; modelConfigurationFullNameValue = window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue]; }} else {{ modelConfigurationFullNameValue = possiblyModelConfigurationFullNameValue; window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue] = modelConfigurationFullNameValue; }} var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}}; Object.keys(modelConfigurationNamesObject).forEach(key => {{ modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key; }}); var modelConfigurationNameValue = ""; if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{ modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue]; }} for (var thisBaseModel of baseModelArray) {{ var thisModelConfigurationElementId = "model_configuration_field_" + thisBaseModel + "_row_id"; var thisModelConfigurationElementDisplay = "none"; if ( (thisBaseModel === baseModelFieldValue) && (allowOnlineConfigurations === 1) ) {{ thisModelConfigurationElementDisplay = "block"; }} document.getElementById(thisModelConfigurationElementId).style.display = thisModelConfigurationElementDisplay; }} var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}}; Object.keys(modelConfigurationNamesObject).forEach(key => {{ modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key; }}); var modelConfigurationNameValue = ""; if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{ modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue]; }} if ( baseModelFieldValue && modelConfigurationNameValue ) {{ var isDefaultConfigState = 1; if (onlineConfigurationsObject[modelConfigurationNameValue]) {{ isDefaultConfigState = 0; }} var negativePromptFieldDisplay = "block"; var negativePromptForSdxlTurboFieldDisplay = "none"; var baseModelNumInferenceStepsFieldDisplay = "block"; var baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay = "none"; var guidanceScaleFieldDisplay = "block"; var guidanceScaleForSdxlTurboFieldDisplay = "none"; if (baseModelFieldValue === "sdxl_turbo") {{ negativePromptFieldDisplay = "none"; negativePromptForSdxlTurboFieldDisplay = "block"; baseModelNumInferenceStepsFieldDisplay = "none"; baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay = "block"; guidanceScaleFieldDisplay = "none"; guidanceScaleForSdxlTurboFieldDisplay = "block"; }} document.getElementById("negative_prompt_field_row_id").style.display = negativePromptFieldDisplay; document.getElementById("negative_prompt_for_sdxl_turbo_field_row_id").style.display = negativePromptForSdxlTurboFieldDisplay; document.getElementById("base_model_steps_field_row_id").style.display = baseModelNumInferenceStepsFieldDisplay; document.getElementById("base_model_steps_field_for_sdxl_turbo_field_row_id").style.display = baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay; document.getElementById("guidance_scale_field_row_id").style.display = guidanceScaleFieldDisplay; document.getElementById("guidance_scale_for_sdxl_turbo_field_row_id").style.display = guidanceScaleForSdxlTurboFieldDisplay; var refinerDefaultConfigAccordionDisplay = "none"; var refinerOnlineConfigAccordionDisplay = "block"; var addSeedIntoPipeFieldDisplay = "none"; if (isDefaultConfigState === 1) {{ refinerDefaultConfigAccordionDisplay = "block"; refinerOnlineConfigAccordionDisplay = "none"; addSeedIntoPipeFieldDisplay = "block"; }} document.getElementById("refiner_default_config_accordion_id").style.display = refinerDefaultConfigAccordionDisplay; document.getElementById("refiner_online_config_accordion_id").style.display = refinerOnlineConfigAccordionDisplay; document.getElementById("add_seed_into_pipe_field_row_id").style.display = addSeedIntoPipeFieldDisplay; var refiningSelectionAutomaticallySelectedMessageFieldDisplay = "none"; var refiningSelectionOnlineConfigNormalFieldDisplay = "block"; var refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay = "none"; if (Object.keys(modelConfigurationForceRefinerObject).includes(modelConfigurationNameValue)) {{ refiningSelectionAutomaticallySelectedMessageFieldDisplay = "block"; refiningSelectionOnlineConfigNormalFieldDisplay = "none"; refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay = "block"; }} var refiningNumberOfIterationsForOnlineConfigFieldDisplay = "none"; if (Object.keys(modelConfigurationIncludeRefinerNumberOfStepsObject).includes(modelConfigurationNameValue)) {{ refiningNumberOfIterationsForOnlineConfigFieldDisplay = "block"; }} var upscalingNumInferenceStepsFieldDisplay = "block"; if (Object.keys(modelConfigurationHideUpscalerStepsObject).includes(modelConfigurationNameValue)) {{ upscalingNumInferenceStepsFieldDisplay = "none"; }} document.getElementById("refining_selection_automatically_selected_message_field_id").style.display = refiningSelectionAutomaticallySelectedMessageFieldDisplay; document.getElementById("refining_selection_online_config_normal_field_id").style.display = refiningSelectionOnlineConfigNormalFieldDisplay; document.getElementById("refining_selection_online_config_automatically_selected_field_id").style.display = refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay; document.getElementById("refining_steps_for_sdxl_online_config_field_row_id").style.display = refiningNumberOfIterationsForOnlineConfigFieldDisplay; document.getElementById("upscaling_num_inference_steps_field_row_id").style.display = upscalingNumInferenceStepsFieldDisplay; }} }} """.format( base_model_names_object, model_configuration_names_object, base_model_array, online_configurations_object, model_configuration_force_refiner_object, model_configuration_include_refiner_number_of_steps_object, model_configuration_hide_upscaler_steps_object, allow_online_configurations ) base_model_field.change( fn = None, inputs = [ base_model_field ], outputs = None, js = model_change_function_js ) for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: this_model_configuration_dropdown_field.change( fn = None, inputs = [ base_model_field, this_model_configuration_dropdown_field ], outputs = None, js = model_change_function_js ) output_image_gallery_field.select( fn = update_prompt_info_from_gallery, inputs = [ prompt_information_array_state ], outputs = [ output_image_gallery_field, output_text_field ], show_progress = "hidden" ) if ( (enable_refiner == 1) or (enable_upscaler == 1) ): triggers_array = [] if enable_refiner == 1: triggers_array.extend([ base_model_field.change, refining_selection_default_config_field.change, refining_selection_online_config_normal_field.change, refining_selection_online_config_automatically_selected_field.change ]) for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: triggers_array.extend([ this_model_configuration_dropdown_field.change ]) if enable_upscaler == 1: triggers_array.extend([ upscaling_selection_field.change ]) gr.on( triggers = triggers_array, fn = None, inputs = [ base_model_field, refining_selection_default_config_field, refining_selection_online_config_normal_field, refining_selection_online_config_automatically_selected_field, upscaling_selection_field ], outputs = None, show_progress = "hidden", queue = False, js = update_refiner_and_upscaler_status_function_js ) create_image_function_inputs = [ base_model_field, prompt_field, negative_prompt_field, scheduler_field, image_width_field, image_height_field, guidance_scale_field, base_model_steps_field, base_model_steps_field_for_sdxl_turbo_field, seed_field, add_seed_into_pipe_field, refining_selection_default_config_field, refining_selection_online_config_normal_field, refining_selection_online_config_automatically_selected_field, refining_denoise_start_for_default_config_field, refining_use_denoising_start_in_base_model_when_using_refiner_field, refining_base_model_output_to_refiner_is_in_latent_space_field, refining_denoise_start_for_online_config_field, refining_steps_for_sdxl_online_config_field, upscaling_selection_field, upscaling_num_inference_steps_field, image_gallery_array_state, prompt_information_array_state, last_model_configuration_name_selected_state, last_refiner_name_selected_state, last_upscaler_name_selected_state, stored_pipe_state, stored_refiner_state, stored_upscaler_state ] for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: create_image_function_inputs.append( this_model_configuration_dropdown_field ) generate_image_btn_click_event = generate_image_btn.click( fn = before_create_image_function, inputs = None, outputs = [ generate_image_btn, output_image_field, output_image_gallery_field, output_text_field ], show_progress = "hidden", queue = True ).then( fn = create_image_function, inputs = create_image_function_inputs, outputs = [ output_image_field, output_image_gallery_field, output_text_field, prompt_truncated_field, last_model_configuration_name_selected_state, last_refiner_name_selected_state, last_upscaler_name_selected_state, stored_pipe_state, stored_refiner_state, stored_upscaler_state ], show_progress = "full", queue = True ).then( fn = after_create_image_function, inputs = None, outputs = [ generate_image_btn, output_text_field ], show_progress = "hidden", queue = True ) verify_seed_field_textbox_function_js = """ async ( seedFieldTextboxValue ) => {{ "use strict"; var defaultSeedMaximum = parseInt({0}); seedFieldTextboxValue = parseInt(seedFieldTextboxValue); if (isNaN(seedFieldTextboxValue)) {{ seedFieldTextboxValue = ""; }} else if (seedFieldTextboxValue > defaultSeedMaximum) {{ seedFieldTextboxValue = defaultSeedMaximum; }} return [ seedFieldTextboxValue ]; }} """.format( maximum_seed ) if make_seed_selection_a_textbox == 1: seed_field.change( fn = None, inputs = [ seed_field ], outputs = [ seed_field ], show_progress = "hidden", queue = False, js = verify_seed_field_textbox_function_js ) if enable_close_command_prompt_button == 1: # https://github.com/gradio-app/gradio/pull/2433/files cancel_image_btn.click( fn = cancel_image_processing, inputs = None, outputs = None, cancels = [generate_image_btn_click_event], queue = True ) # Remove last comma model_configuration_dropdown_field_values_for_js = model_configuration_dropdown_field_values_for_js[:-1] script_on_load_js = """ async () => {{ "use strict"; window.modelConfigurationDropdownFieldValuesObject = {{{0}}}; }} """.format( model_configuration_dropdown_field_values_for_js ) model_base_model_and_model_configuration_inputs = [ base_model_field, initial_model_configuration_name_selected_state ] model_base_model_and_model_configuration_outputs = [] for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: model_base_model_and_model_configuration_inputs.append( this_model_configuration_dropdown_field ) model_base_model_and_model_configuration_outputs.append( this_model_configuration_dropdown_field ) sd_interface_load_outputs = create_image_function_inputs + [ initial_model_configuration_name_selected_state, generate_image_btn ] sd_interface_continuous = sd_interface.load( fn = get_query_params, inputs = None, outputs = sd_interface_load_outputs, show_progress = "hidden", queue = False, scroll_to_output = False, js = script_on_load_js ).then( fn = set_base_model_and_model_configuration_from_query_params, inputs = model_base_model_and_model_configuration_inputs, outputs = model_base_model_and_model_configuration_outputs, show_progress = "hidden", queue = False ) sd_interface.queue( max_size = max_queue_size ) inbrowser = False if auto_open_browser == 1: inbrowser = True sd_interface.launch( inbrowser = inbrowser, share = None, show_api = False, quiet = True, show_error = True, state_session_capacity = 10000, max_threads = 40 )