Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import os | |
from stablepy import Model_Diffusers | |
from stablepy.diffusers_vanilla.model import scheduler_names | |
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES | |
import torch | |
import re | |
import shutil | |
import random | |
from stablepy import ( | |
CONTROLNET_MODEL_IDS, | |
VALID_TASKS, | |
T2I_PREPROCESSOR_NAME, | |
FLASH_LORA, | |
SCHEDULER_CONFIG_MAP, | |
scheduler_names, | |
IP_ADAPTER_MODELS, | |
IP_ADAPTERS_SD, | |
IP_ADAPTERS_SDXL, | |
REPO_IMAGE_ENCODER, | |
ALL_PROMPT_WEIGHT_OPTIONS, | |
SD15_TASKS, | |
SDXL_TASKS, | |
) | |
import urllib.parse | |
from config import ( | |
MINIMUM_IMAGE_NUMBER, | |
MAXIMUM_IMAGE_NUMBER, | |
DEFAULT_NEGATIVE_PROMPT, | |
DEFAULT_POSITIVE_PROMPT | |
) | |
from models.vae import VAE_LIST as download_vae | |
from models.checkpoints import CHECKPOINT_LIST as download_model | |
from models.loras import LORA_LIST as download_lora | |
from models.format_models import FORMAT_MODELS as load_diffusers_format_model | |
from models.upscaler import upscaler_dict_gui | |
from models.controlnet import preprocessor_controlnet | |
from models.embeds import download_embeds | |
from examples.examples import example_prompts | |
from utils.download_utils import download_things | |
from utils.model_utils import get_model_list | |
task_stablepy: dict = { | |
'txt2img': 'txt2img', | |
'img2img': 'img2img', | |
'inpaint': 'inpaint', | |
# 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0 | |
# 'sketch T2I Adapter': 'sdxl_sketch_t2i', | |
# 'lineart T2I Adapter': 'sdxl_lineart_t2i', | |
# 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i', | |
# 'openpose T2I Adapter': 'sdxl_openpose_t2i', | |
'openpose ControlNet': 'openpose', | |
'canny ControlNet': 'canny', | |
'mlsd ControlNet': 'mlsd', | |
'scribble ControlNet': 'scribble', | |
'softedge ControlNet': 'softedge', | |
'segmentation ControlNet': 'segmentation', | |
'depth ControlNet': 'depth', | |
'normalbae ControlNet': 'normalbae', | |
'lineart ControlNet': 'lineart', | |
# 'lineart_anime ControlNet': 'lineart_anime', | |
'shuffle ControlNet': 'shuffle', | |
'ip2p ControlNet': 'ip2p', | |
'optical pattern ControlNet': 'pattern', | |
'tile realistic': 'sdxl_tile_realistic', | |
} | |
# LOAD ALL ENV TOKEN | |
CIVITAI_API_KEY: str = os.environ.get("CIVITAI_API_KEY") | |
hf_token: str = os.environ.get("HF_TOKEN") | |
task_model_list = list(task_stablepy.keys()) | |
directory_models: str = 'models' | |
os.makedirs( | |
directory_models, | |
exist_ok=True | |
) | |
directory_loras: str = 'loras' | |
os.makedirs( | |
directory_loras, | |
exist_ok=True | |
) | |
directory_vaes: str = 'vaes' | |
os.makedirs( | |
directory_vaes, | |
exist_ok=True | |
) | |
directory_embeds: str = 'embedings' | |
os.makedirs( | |
directory_embeds, | |
exist_ok=True | |
) | |
# Download stuffs | |
for url in [url.strip() for url in download_model.split(',')]: | |
if not os.path.exists(f"./models/{url.split('/')[-1]}"): | |
download_things( | |
directory_models, | |
url, | |
hf_token, | |
CIVITAI_API_KEY | |
) | |
for url in [url.strip() for url in download_vae.split(',')]: | |
if not os.path.exists(f"./vaes/{url.split('/')[-1]}"): | |
download_things( | |
directory_vaes, | |
url, | |
hf_token, | |
CIVITAI_API_KEY | |
) | |
for url in [url.strip() for url in download_lora.split(',')]: | |
if not os.path.exists(f"./loras/{url.split('/')[-1]}"): | |
download_things( | |
directory_loras, | |
url, | |
hf_token, | |
CIVITAI_API_KEY | |
) | |
for url_embed in download_embeds: | |
if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"): | |
download_things( | |
directory_embeds, | |
url_embed, | |
hf_token, | |
CIVITAI_API_KEY | |
) | |
# Build list models | |
embed_list = get_model_list(directory_embeds) | |
model_list = get_model_list(directory_models) | |
model_list = load_diffusers_format_model + model_list | |
lora_model_list = get_model_list(directory_loras) | |
lora_model_list.insert(0, "None") | |
vae_model_list = get_model_list(directory_vaes) | |
vae_model_list.insert(0, "None") | |
def get_my_lora(link_url) -> tuple: | |
for __url in [_url.strip() for _url in link_url.split(',')]: | |
if not os.path.exists(f"./loras/{__url.split('/')[-1]}"): | |
download_things( | |
directory_loras, | |
__url, | |
hf_token, | |
CIVITAI_API_KEY | |
) | |
new_lora_model_list: list = get_model_list(directory_loras) | |
new_lora_model_list.insert(0, "None") | |
return gr.update( | |
choices=new_lora_model_list | |
), gr.update( | |
choices=new_lora_model_list | |
), gr.update( | |
choices=new_lora_model_list | |
), gr.update( | |
choices=new_lora_model_list | |
), gr.update( | |
choices=new_lora_model_list | |
) | |
print('\033[33m🏁 Download and listing of valid models completed.\033[0m') | |
####################### | |
# GUI | |
####################### | |
import spaces | |
import gradio as gr | |
from PIL import Image | |
import IPython.display | |
import time, json | |
from IPython.utils import capture | |
import logging | |
from utils.string_utils import extract_parameters | |
from stablepy import logger | |
logging.getLogger("diffusers").setLevel(logging.ERROR) | |
import diffusers | |
diffusers.utils.logging.set_verbosity(40) | |
import warnings | |
from gui import GuiSD | |
warnings.filterwarnings( | |
action="ignore", | |
category=FutureWarning, | |
module="diffusers" | |
) | |
warnings.filterwarnings( | |
action="ignore", | |
category=UserWarning, | |
module="diffusers" | |
) | |
warnings.filterwarnings( | |
action="ignore", | |
category=FutureWarning, | |
module="transformers" | |
) | |
logger.setLevel(logging.DEBUG) | |
# init GuiSD | |
sd_gen = GuiSD( | |
model_list=model_list, | |
task_stablepy=task_stablepy, | |
lora_model_list=lora_model_list | |
) | |
with open("app.css", "r") as f: | |
CSS: str = f.read() | |
sdxl_task = [k for k, v in task_stablepy.items() if v in SDXL_TASKS] | |
sd_task = [k for k, v in task_stablepy.items() if v in SD15_TASKS] | |
def update_task_options(model_name, task_name): | |
if model_name in model_list: | |
if "xl" in model_name.lower(): | |
new_choices = sdxl_task | |
else: | |
new_choices = sd_task | |
if task_name not in new_choices: | |
task_name = "txt2img" | |
return gr.update( | |
value=task_name, | |
choices=new_choices | |
) | |
else: | |
return gr.update( | |
value=task_name, | |
choices=task_model_list | |
) | |
# APP | |
with gr.Blocks(css=CSS) as app: | |
gr.Markdown("# 🧩 (Ivan) DiffuseCraft") | |
with gr.Tab("Generation"): | |
with gr.Row(): | |
with gr.Column(scale=2): | |
task_gui = gr.Dropdown( | |
label="Task", | |
choices=sdxl_task, | |
value=task_model_list[0], | |
) | |
model_name_gui = gr.Dropdown( | |
label="Model", | |
choices=model_list, | |
value="models/animaPencilXL_v500.safetensors" or model_list[0], | |
allow_custom_value=True | |
) | |
prompt_gui = gr.Textbox( | |
lines=5, | |
placeholder="Enter Positive prompt", | |
label="Positive Prompt", | |
value=DEFAULT_POSITIVE_PROMPT | |
) | |
neg_prompt_gui = gr.Textbox( | |
lines=3, | |
placeholder="Enter Negative prompt", | |
label="Negative prompt", | |
value=DEFAULT_NEGATIVE_PROMPT | |
) | |
with gr.Row(equal_height=False): | |
set_params_gui = gr.Button(value="↙️") | |
clear_prompt_gui = gr.Button(value="🗑️") | |
set_random_seed = gr.Button(value="🎲") | |
generate_button = gr.Button( | |
value="GENERATE", | |
variant="primary" | |
) | |
model_name_gui.change( | |
update_task_options, | |
[model_name_gui, task_gui], | |
[task_gui], | |
) | |
load_model_gui = gr.HTML() | |
result_images = gr.Gallery( | |
label="Generated images", | |
show_label=False, | |
elem_id="gallery", | |
columns=[2], | |
rows=[2], | |
object_fit="contain", | |
# height="auto", | |
interactive=False, | |
preview=False, | |
selected_index=50, | |
) | |
actual_task_info = gr.HTML() | |
with gr.Column(scale=1): | |
steps_gui = gr.Slider( | |
minimum=1, | |
maximum=100, | |
step=1, | |
value=43, | |
label="Steps" | |
) | |
cfg_gui = gr.Slider( | |
minimum=0, | |
maximum=30, | |
step=0.5, | |
value=7.5, | |
label="CFG" | |
) | |
sampler_gui = gr.Dropdown( | |
label="Sampler", | |
choices=scheduler_names, | |
value="DPM++ 2M Karras" | |
) | |
img_width_gui = gr.Slider( | |
minimum=64, | |
maximum=4096, | |
step=8, | |
value=1024, | |
label="Img Width" | |
) | |
img_height_gui = gr.Slider( | |
minimum=64, | |
maximum=4096, | |
step=8, | |
value=1024, | |
label="Img Height" | |
) | |
seed_gui = gr.Number( | |
minimum=-1, | |
maximum=9999999999, | |
value=-1, | |
label="Seed" | |
) | |
with gr.Row(): | |
clip_skip_gui = gr.Checkbox( | |
value=True, | |
label="Layer 2 Clip Skip" | |
) | |
free_u_gui = gr.Checkbox( | |
value=True, | |
label="FreeU" | |
) | |
with gr.Row(equal_height=False): | |
def run_set_params_gui(base_prompt): | |
valid_receptors: dict = { # default values | |
"prompt": gr.update(value=base_prompt), | |
"neg_prompt": gr.update(value=""), | |
"Steps": gr.update(value=30), | |
"width": gr.update(value=1024), | |
"height": gr.update(value=1024), | |
"Seed": gr.update(value=-1), | |
"Sampler": gr.update(value="Euler a"), | |
"scale": gr.update(value=7.5), # cfg | |
"skip": gr.update(value=True), | |
} | |
valid_keys = list(valid_receptors.keys()) | |
parameters: dict = extract_parameters(base_prompt) | |
for key, val in parameters.items(): | |
# print(val) | |
if key in valid_keys: | |
if key == "Sampler": | |
if val not in scheduler_names: | |
continue | |
elif key == "skip": | |
if int(val) >= 2: | |
val = True | |
if key == "prompt": | |
if ">" in val and "<" in val: | |
val = re.sub(r'<[^>]+>', '', val) | |
print("Removed LoRA written in the prompt") | |
if key in ["prompt", "neg_prompt"]: | |
val = val.strip() | |
if key in ["Steps", "width", "height", "Seed"]: | |
val = int(val) | |
if key == "scale": | |
val = float(val) | |
if key == "Seed": | |
continue | |
valid_receptors[key] = gr.update(value=val) | |
# print(val, type(val)) | |
# print(valid_receptors) | |
return [value for value in valid_receptors.values()] | |
set_params_gui.click( | |
run_set_params_gui, [prompt_gui], [ | |
prompt_gui, | |
neg_prompt_gui, | |
steps_gui, | |
img_width_gui, | |
img_height_gui, | |
seed_gui, | |
sampler_gui, | |
cfg_gui, | |
clip_skip_gui, | |
], | |
) | |
def run_clear_prompt_gui(): | |
return gr.update(value=""), gr.update(value="") | |
clear_prompt_gui.click( | |
run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui] | |
) | |
def run_set_random_seed(): | |
return -1 | |
set_random_seed.click( | |
run_set_random_seed, [], seed_gui | |
) | |
num_images_gui = gr.Slider( | |
minimum=MINIMUM_IMAGE_NUMBER, | |
maximum=MAXIMUM_IMAGE_NUMBER, | |
step=1, | |
value=1, | |
label="Images" | |
) | |
prompt_s_options = [ | |
("Classic format: (word:weight)", "Classic"), | |
("Compel format: (word)weight", "Compel"), | |
("Classic-original format: (word:weight)", "Classic-original"), | |
("Classic-no_norm format: (word:weight)", "Classic-no_norm"), | |
("Classic-ignore", "Classic-ignore"), | |
("None", "None"), | |
] | |
prompt_syntax_gui = gr.Dropdown( | |
label="Prompt Syntax", | |
choices=prompt_s_options, | |
value=prompt_s_options[0][1] | |
) | |
vae_model_gui = gr.Dropdown( | |
label="VAE Model", | |
choices=vae_model_list, | |
value=vae_model_list[1] | |
) | |
with gr.Accordion( | |
"Hires fix", | |
open=False, | |
visible=True): | |
upscaler_keys = list(upscaler_dict_gui.keys()) | |
upscaler_model_path_gui = gr.Dropdown( | |
label="Upscaler", | |
choices=upscaler_keys, | |
value=upscaler_keys[0] | |
) | |
upscaler_increases_size_gui = gr.Slider( | |
minimum=1.1, | |
maximum=6., | |
step=0.1, | |
value=1.4, | |
label="Upscale by" | |
) | |
esrgan_tile_gui = gr.Slider( | |
minimum=0, | |
value=100, | |
maximum=500, | |
step=1, | |
label="ESRGAN Tile" | |
) | |
esrgan_tile_overlap_gui = gr.Slider( | |
minimum=1, | |
maximum=200, | |
step=1, | |
value=10, | |
label="ESRGAN Tile Overlap" | |
) | |
hires_steps_gui = gr.Slider( | |
minimum=0, | |
value=30, | |
maximum=100, | |
step=1, | |
label="Hires Steps" | |
) | |
hires_denoising_strength_gui = gr.Slider( | |
minimum=0.1, | |
maximum=1.0, | |
step=0.01, | |
value=0.55, | |
label="Hires Denoising Strength" | |
) | |
hires_sampler_gui = gr.Dropdown( | |
label="Hires Sampler", | |
choices=["Use same sampler"] + scheduler_names[:-1], | |
value="Use same sampler" | |
) | |
hires_prompt_gui = gr.Textbox( | |
label="Hires Prompt", | |
placeholder="Main prompt will be use", | |
lines=3 | |
) | |
hires_negative_prompt_gui = gr.Textbox( | |
label="Hires Negative Prompt", | |
placeholder="Main negative prompt will be use", | |
lines=3 | |
) | |
with gr.Accordion("LoRA", open=False, visible=True): | |
lora1_gui = gr.Dropdown( | |
label="Lora1", | |
choices=lora_model_list | |
) | |
lora_scale_1_gui = gr.Slider( | |
minimum=-2, | |
maximum=2, | |
step=0.01, | |
value=0.33, | |
label="Lora Scale 1" | |
) | |
lora2_gui = gr.Dropdown( | |
label="Lora2", | |
choices=lora_model_list | |
) | |
lora_scale_2_gui = gr.Slider( | |
minimum=-2, | |
maximum=2, | |
step=0.01, | |
value=0.33, | |
label="Lora Scale 2" | |
) | |
lora3_gui = gr.Dropdown( | |
label="Lora3", | |
choices=lora_model_list | |
) | |
lora_scale_3_gui = gr.Slider( | |
minimum=-2, | |
maximum=2, | |
step=0.01, | |
value=0.33, | |
label="Lora Scale 3" | |
) | |
lora4_gui = gr.Dropdown( | |
label="Lora4", | |
choices=lora_model_list | |
) | |
lora_scale_4_gui = gr.Slider( | |
minimum=-2, | |
maximum=2, | |
step=0.01, | |
value=0.33, | |
label="Lora Scale 4" | |
) | |
lora5_gui = gr.Dropdown( | |
label="Lora5", | |
choices=lora_model_list | |
) | |
lora_scale_5_gui = gr.Slider( | |
minimum=-2, | |
maximum=2, | |
step=0.01, | |
value=0.33, | |
label="Lora Scale 5" | |
) | |
with gr.Accordion( | |
"From URL", | |
open=False, | |
visible=True): | |
text_lora = gr.Textbox( | |
label="URL", | |
placeholder="http://...my_lora_url.safetensors", | |
lines=1 | |
) | |
button_lora = gr.Button("Get and update lists of LoRAs") | |
button_lora.click( | |
get_my_lora, | |
[text_lora], | |
[ | |
lora1_gui, | |
lora2_gui, | |
lora3_gui, | |
lora4_gui, | |
lora5_gui | |
] | |
) | |
with gr.Accordion("IP-Adapter", open=False, visible=True): # IP-Adapter | |
IP_MODELS = sorted( | |
list( | |
set( | |
IP_ADAPTERS_SD + IP_ADAPTERS_SDXL | |
) | |
) | |
) | |
MODE_IP_OPTIONS = [ | |
"original", | |
"style", | |
"layout", | |
"style+layout" | |
] | |
with gr.Accordion("IP-Adapter 1", open=False, visible=True): | |
image_ip1 = gr.Image( | |
label="IP Image", | |
type="filepath" | |
) | |
mask_ip1 = gr.Image( | |
label="IP Mask", | |
type="filepath" | |
) | |
model_ip1 = gr.Dropdown( | |
value="plus_face", | |
label="Model", | |
choices=IP_MODELS | |
) | |
mode_ip1 = gr.Dropdown( | |
value="original", | |
label="Mode", | |
choices=MODE_IP_OPTIONS | |
) | |
scale_ip1 = gr.Slider( | |
minimum=0., | |
maximum=2., | |
step=0.01, | |
value=0.7, | |
label="Scale" | |
) | |
with gr.Accordion("IP-Adapter 2", open=False, visible=True): | |
image_ip2 = gr.Image( | |
label="IP Image", | |
type="filepath" | |
) | |
mask_ip2 = gr.Image( | |
label="IP Mask (optional)", | |
type="filepath" | |
) | |
model_ip2 = gr.Dropdown( | |
value="base", | |
label="Model", | |
choices=IP_MODELS | |
) | |
mode_ip2 = gr.Dropdown( | |
value="style", | |
label="Mode", | |
choices=MODE_IP_OPTIONS | |
) | |
scale_ip2 = gr.Slider( | |
minimum=0., | |
maximum=2., | |
step=0.01, | |
value=0.7, | |
label="Scale" | |
) | |
with gr.Accordion( | |
"ControlNet / Img2img / Inpaint", | |
open=False, | |
visible=True): | |
image_control = gr.Image( | |
label="Image ControlNet/Inpaint/Img2img", | |
type="filepath" | |
) | |
image_mask_gui = gr.Image( | |
label="Image Mask", | |
type="filepath" | |
) | |
strength_gui = gr.Slider( | |
minimum=0.01, | |
maximum=1.0, | |
step=0.01, | |
value=0.55, | |
label="Strength", | |
info="This option adjusts the level of changes for img2img and inpainting." | |
) | |
image_resolution_gui = gr.Slider( | |
minimum=64, | |
maximum=2048, | |
step=64, value=1024, | |
label="Image Resolution" | |
) | |
preprocessor_name_gui = gr.Dropdown( | |
label="Preprocessor Name", | |
choices=preprocessor_controlnet["canny"] | |
) | |
def change_preprocessor_choices(task): | |
task = task_stablepy[task] | |
if task in preprocessor_controlnet.keys(): | |
choices_task = preprocessor_controlnet[task] | |
else: | |
choices_task = preprocessor_controlnet["canny"] | |
return gr.update( | |
choices=choices_task, | |
value=choices_task[0] | |
) | |
task_gui.change( | |
change_preprocessor_choices, | |
[task_gui], | |
[preprocessor_name_gui], | |
) | |
preprocess_resolution_gui = gr.Slider( | |
minimum=64, | |
maximum=2048, | |
step=64, | |
value=512, | |
label="Preprocess Resolution" | |
) | |
low_threshold_gui = gr.Slider( | |
minimum=1, | |
maximum=255, | |
step=1, | |
value=100, | |
label="Canny low threshold" | |
) | |
high_threshold_gui = gr.Slider( | |
minimum=1, | |
maximum=255, | |
step=1, | |
value=200, | |
label="Canny high threshold" | |
) | |
value_threshold_gui = gr.Slider( | |
minimum=1, | |
maximum=2.0, | |
step=0.01, value=0.1, | |
label="Hough value threshold (MLSD)" | |
) | |
distance_threshold_gui = gr.Slider( | |
minimum=1, | |
maximum=20.0, | |
step=0.01, | |
value=0.1, | |
label="Hough distance threshold (MLSD)" | |
) | |
control_net_output_scaling_gui = gr.Slider( | |
minimum=0, | |
maximum=5.0, | |
step=0.1, | |
value=1, | |
label="ControlNet Output Scaling in UNet" | |
) | |
control_net_start_threshold_gui = gr.Slider( | |
minimum=0, | |
maximum=1, | |
step=0.01, | |
value=0, | |
label="ControlNet Start Threshold (%)" | |
) | |
control_net_stop_threshold_gui = gr.Slider( | |
minimum=0, | |
maximum=1, | |
step=0.01, | |
value=1, | |
label="ControlNet Stop Threshold (%)" | |
) | |
with gr.Accordion( | |
"T2I adapter", | |
open=False, | |
visible=True): | |
t2i_adapter_preprocessor_gui = gr.Checkbox( | |
value=True, | |
label="T2i Adapter Preprocessor" | |
) | |
adapter_conditioning_scale_gui = gr.Slider( | |
minimum=0, | |
maximum=5., | |
step=0.1, | |
value=1, | |
label="Adapter Conditioning Scale" | |
) | |
adapter_conditioning_factor_gui = gr.Slider( | |
minimum=0, | |
maximum=1., | |
step=0.01, | |
value=0.55, | |
label="Adapter Conditioning Factor (%)" | |
) | |
with gr.Accordion( | |
"Styles", | |
open=False, | |
visible=True): | |
try: | |
style_names_found = sd_gen.model.STYLE_NAMES | |
except: | |
style_names_found = STYLE_NAMES | |
style_prompt_gui = gr.Dropdown( | |
style_names_found, | |
multiselect=True, | |
value=None, | |
label="Style Prompt", | |
interactive=True, | |
) | |
style_json_gui = gr.File(label="Style JSON File") | |
style_button = gr.Button("Load styles") | |
def load_json_style_file(json): | |
if not sd_gen.model: | |
gr.Info("First load the model") | |
return gr.update( | |
value=None, | |
choices=STYLE_NAMES | |
) | |
sd_gen.model.load_style_file(json) | |
gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded") | |
return gr.update( | |
value=None, | |
choices=sd_gen.model.STYLE_NAMES | |
) | |
style_button.click( | |
load_json_style_file, | |
[style_json_gui], | |
[style_prompt_gui] | |
) | |
with gr.Accordion( | |
"Textual inversion", | |
open=False, | |
visible=False): | |
active_textual_inversion_gui = gr.Checkbox( | |
value=False, | |
label="Active Textual Inversion in prompt" | |
) | |
with gr.Accordion( | |
"Detailfix", | |
open=False, | |
visible=True): | |
# Adetailer Inpaint Only | |
adetailer_inpaint_only_gui = gr.Checkbox(label="Inpaint only", value=True) | |
# Adetailer Verbose | |
adetailer_verbose_gui = gr.Checkbox(label="Verbose", value=False) | |
# Adetailer Sampler | |
adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1] | |
adetailer_sampler_gui = gr.Dropdown( | |
label="Adetailer sampler:", | |
choices=adetailer_sampler_options, | |
value="Use same sampler" | |
) | |
with gr.Accordion( | |
"Detailfix A", | |
open=False, | |
visible=True): | |
# Adetailer A | |
adetailer_active_a_gui = gr.Checkbox( | |
label="Enable Adetailer A", | |
value=False | |
) | |
prompt_ad_a_gui = gr.Textbox( | |
label="Main prompt", | |
placeholder="Main prompt will be use", | |
lines=3 | |
) | |
negative_prompt_ad_a_gui = gr.Textbox( | |
label="Negative prompt", | |
placeholder="Main negative prompt will be use", | |
lines=3 | |
) | |
strength_ad_a_gui = gr.Number( | |
label="Strength:", | |
value=0.35, step=0.01, | |
minimum=0.01, | |
maximum=1.0 | |
) | |
face_detector_ad_a_gui = gr.Checkbox( | |
label="Face detector", | |
value=True | |
) | |
person_detector_ad_a_gui = gr.Checkbox( | |
label="Person detector", | |
value=True | |
) | |
hand_detector_ad_a_gui = gr.Checkbox( | |
label="Hand detector", | |
value=False | |
) | |
mask_dilation_a_gui = gr.Number( | |
label="Mask dilation:", | |
value=4, | |
minimum=1 | |
) | |
mask_blur_a_gui = gr.Number( | |
label="Mask blur:", | |
value=4, | |
minimum=1 | |
) | |
mask_padding_a_gui = gr.Number( | |
label="Mask padding:", | |
value=32, | |
minimum=1 | |
) | |
with gr.Accordion( | |
"Detailfix B", | |
open=False, | |
visible=True): | |
# Adetailer B | |
adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False) | |
prompt_ad_b_gui = gr.Textbox( | |
label="Main prompt", | |
placeholder="Main prompt will be use", | |
lines=3 | |
) | |
negative_prompt_ad_b_gui = gr.Textbox( | |
label="Negative prompt", | |
placeholder="Main negative prompt will be use", | |
lines=3 | |
) | |
strength_ad_b_gui = gr.Number( | |
label="Strength:", | |
value=0.35, | |
step=0.01, | |
minimum=0.01, | |
maximum=1.0 | |
) | |
face_detector_ad_b_gui = gr.Checkbox( | |
label="Face detector", | |
value=True | |
) | |
person_detector_ad_b_gui = gr.Checkbox( | |
label="Person detector", | |
value=True | |
) | |
hand_detector_ad_b_gui = gr.Checkbox( | |
label="Hand detector", | |
value=False | |
) | |
mask_dilation_b_gui = gr.Number( | |
label="Mask dilation:", | |
value=4, | |
minimum=1 | |
) | |
mask_blur_b_gui = gr.Number( | |
label="Mask blur:", | |
value=4, | |
minimum=1 | |
) | |
mask_padding_b_gui = gr.Number( | |
label="Mask padding:", | |
value=32, | |
minimum=1 | |
) | |
with gr.Accordion( | |
"Other settings", | |
open=False, | |
visible=True): | |
image_previews_gui = gr.Checkbox( | |
value=True, | |
label="Image Previews" | |
) | |
hires_before_adetailer_gui = gr.Checkbox( | |
value=False, | |
label="Hires Before Adetailer" | |
) | |
hires_after_adetailer_gui = gr.Checkbox( | |
value=True, | |
label="Hires After Adetailer" | |
) | |
generator_in_cpu_gui = gr.Checkbox( | |
value=False, | |
label="Generator in CPU" | |
) | |
with gr.Accordion( | |
"More settings", | |
open=False, | |
visible=False): | |
loop_generation_gui = gr.Slider( | |
minimum=1, | |
value=1, | |
label="Loop Generation" | |
) | |
retain_task_cache_gui = gr.Checkbox( | |
value=False, | |
label="Retain task model in cache" | |
) | |
leave_progress_bar_gui = gr.Checkbox( | |
value=True, | |
label="Leave Progress Bar" | |
) | |
disable_progress_bar_gui = gr.Checkbox( | |
value=False, | |
label="Disable Progress Bar" | |
) | |
display_images_gui = gr.Checkbox( | |
value=True, | |
label="Display Images" | |
) | |
save_generated_images_gui = gr.Checkbox( | |
value=False, | |
label="Save Generated Images" | |
) | |
image_storage_location_gui = gr.Textbox( | |
value="./images", | |
label="Image Storage Location" | |
) | |
retain_compel_previous_load_gui = gr.Checkbox( | |
value=False, | |
label="Retain Compel Previous Load" | |
) | |
retain_detailfix_model_previous_load_gui = gr.Checkbox( | |
value=False, | |
label="Retain Detailfix Model Previous Load" | |
) | |
retain_hires_model_previous_load_gui = gr.Checkbox( | |
value=False, | |
label="Retain Hires Model Previous Load" | |
) | |
xformers_memory_efficient_attention_gui = gr.Checkbox( | |
value=False, | |
label="Xformers Memory Efficient Attention" | |
) | |
# example and Help Section | |
with gr.Accordion("Examples and help", open=False, visible=True): | |
gr.Markdown( | |
"""### Help: | |
- The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, \ | |
if you submit expensive tasks, the operation may be canceled upon reaching the \ | |
maximum allowed time with 'GPU TASK ABORTED'. | |
- Distorted or strange images often result from high prompt weights, \ | |
so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'. | |
- For better results with Pony Diffusion, \ | |
try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights. | |
""" | |
) | |
gr.Markdown( | |
"""### The following examples perform specific tasks: | |
1. Generation with SDXL and upscale | |
2. Generation with SDXL | |
3. ControlNet Canny SDXL | |
4. Optical pattern (Optical illusion) SDXL | |
5. Convert an image to a coloring drawing | |
6. ControlNet OpenPose SD 1.5 | |
- Different tasks can be performed, such as img2img or using the IP adapter, \ | |
to preserve a person's appearance or a specific style based on an image. | |
""" | |
) | |
gr.Examples( | |
examples=example_prompts, | |
fn=sd_gen.generate_pipeline, | |
inputs=[ | |
prompt_gui, | |
neg_prompt_gui, | |
num_images_gui, | |
steps_gui, | |
cfg_gui, | |
clip_skip_gui, | |
seed_gui, | |
lora1_gui, | |
lora_scale_1_gui, | |
lora2_gui, | |
lora_scale_2_gui, | |
lora3_gui, | |
lora_scale_3_gui, | |
lora4_gui, | |
lora_scale_4_gui, | |
lora5_gui, | |
lora_scale_5_gui, | |
sampler_gui, | |
img_height_gui, | |
img_width_gui, | |
model_name_gui, | |
vae_model_gui, | |
task_gui, | |
image_control, | |
preprocessor_name_gui, | |
preprocess_resolution_gui, | |
image_resolution_gui, | |
style_prompt_gui, | |
style_json_gui, | |
image_mask_gui, | |
strength_gui, | |
low_threshold_gui, | |
high_threshold_gui, | |
value_threshold_gui, | |
distance_threshold_gui, | |
control_net_output_scaling_gui, | |
control_net_start_threshold_gui, | |
control_net_stop_threshold_gui, | |
active_textual_inversion_gui, | |
prompt_syntax_gui, | |
upscaler_model_path_gui, | |
], | |
outputs=[result_images], | |
cache_examples=False, | |
) | |
with gr.Tab("Inpaint mask maker", render=True): | |
def create_mask_now(img, invert): | |
import numpy as np | |
import time | |
time.sleep(0.5) | |
transparent_image = img["layers"][0] | |
# Extract the alpha channel | |
alpha_channel = np.array(transparent_image)[:, :, 3] | |
# Create a binary mask by thresholding the alpha channel | |
binary_mask = alpha_channel > 1 | |
if invert: | |
print("Invert") | |
# Invert the binary mask so that the drawn shape is white and the rest is black | |
binary_mask = np.invert(binary_mask) | |
# Convert the binary mask to a 3-channel RGB mask | |
rgb_mask = np.stack((binary_mask,) * 3, axis=-1) | |
# Convert the mask to uint8 | |
rgb_mask = rgb_mask.astype(np.uint8) * 255 | |
return img["background"], rgb_mask | |
with gr.Row(): | |
with gr.Column(scale=2): | |
# image_base = gr.ImageEditor(label="Base image", show_label=True, brush=gr.Brush(colors=["#000000"])) | |
image_base = gr.ImageEditor( | |
sources=[ | |
"upload", | |
"clipboard" | |
], | |
# crop_size="1:1", | |
# enable crop (or disable it) | |
# transforms=["crop"], | |
brush=gr.Brush( | |
default_size="16", # or leave it as 'auto' | |
color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it | |
# default_color="black", # html names are supported | |
colors=[ | |
"rgba(0, 0, 0, 1)", # rgb(a) | |
"rgba(0, 0, 0, 0.1)", | |
"rgba(255, 255, 255, 0.1)", | |
# "hsl(360, 120, 120)" # in fact any valid colorstring | |
] | |
), | |
eraser=gr.Eraser(default_size="16") | |
) | |
invert_mask = gr.Checkbox( | |
value=False, | |
label="Invert mask" | |
) | |
btn = gr.Button("Create mask") | |
with gr.Column(scale=1): | |
img_source = gr.Image(interactive=False) | |
img_result = gr.Image( | |
label="Mask image", | |
show_label=True, | |
interactive=False | |
) | |
btn_send = gr.Button("Send to the first tab") | |
btn.click( | |
create_mask_now, | |
[image_base, invert_mask], | |
[img_source, img_result] | |
) | |
def send_img(img_source, img_result) -> tuple: | |
return img_source, img_result | |
btn_send.click( | |
send_img, | |
[img_source, img_result], | |
[image_control, image_mask_gui] | |
) | |
generate_button.click( | |
fn=sd_gen.load_new_model, | |
inputs=[ | |
model_name_gui, | |
vae_model_gui, | |
task_gui | |
], | |
outputs=[load_model_gui], | |
queue=True, | |
show_progress="minimal", | |
).success( | |
fn=sd_gen.generate_pipeline, | |
inputs=[ | |
prompt_gui, | |
neg_prompt_gui, | |
num_images_gui, | |
steps_gui, | |
cfg_gui, | |
clip_skip_gui, | |
seed_gui, | |
lora1_gui, | |
lora_scale_1_gui, | |
lora2_gui, | |
lora_scale_2_gui, | |
lora3_gui, | |
lora_scale_3_gui, | |
lora4_gui, | |
lora_scale_4_gui, | |
lora5_gui, | |
lora_scale_5_gui, | |
sampler_gui, | |
img_height_gui, | |
img_width_gui, | |
model_name_gui, | |
vae_model_gui, | |
task_gui, | |
image_control, | |
preprocessor_name_gui, | |
preprocess_resolution_gui, | |
image_resolution_gui, | |
style_prompt_gui, | |
style_json_gui, | |
image_mask_gui, | |
strength_gui, | |
low_threshold_gui, | |
high_threshold_gui, | |
value_threshold_gui, | |
distance_threshold_gui, | |
control_net_output_scaling_gui, | |
control_net_start_threshold_gui, | |
control_net_stop_threshold_gui, | |
active_textual_inversion_gui, | |
prompt_syntax_gui, | |
upscaler_model_path_gui, | |
upscaler_increases_size_gui, | |
esrgan_tile_gui, | |
esrgan_tile_overlap_gui, | |
hires_steps_gui, | |
hires_denoising_strength_gui, | |
hires_sampler_gui, | |
hires_prompt_gui, | |
hires_negative_prompt_gui, | |
hires_before_adetailer_gui, | |
hires_after_adetailer_gui, | |
loop_generation_gui, | |
leave_progress_bar_gui, | |
disable_progress_bar_gui, | |
image_previews_gui, | |
display_images_gui, | |
save_generated_images_gui, | |
image_storage_location_gui, | |
retain_compel_previous_load_gui, | |
retain_detailfix_model_previous_load_gui, | |
retain_hires_model_previous_load_gui, | |
t2i_adapter_preprocessor_gui, | |
adapter_conditioning_scale_gui, | |
adapter_conditioning_factor_gui, | |
xformers_memory_efficient_attention_gui, | |
free_u_gui, | |
generator_in_cpu_gui, | |
adetailer_inpaint_only_gui, | |
adetailer_verbose_gui, | |
adetailer_sampler_gui, | |
adetailer_active_a_gui, | |
prompt_ad_a_gui, | |
negative_prompt_ad_a_gui, | |
strength_ad_a_gui, | |
face_detector_ad_a_gui, | |
person_detector_ad_a_gui, | |
hand_detector_ad_a_gui, | |
mask_dilation_a_gui, | |
mask_blur_a_gui, | |
mask_padding_a_gui, | |
adetailer_active_b_gui, | |
prompt_ad_b_gui, | |
negative_prompt_ad_b_gui, | |
strength_ad_b_gui, | |
face_detector_ad_b_gui, | |
person_detector_ad_b_gui, | |
hand_detector_ad_b_gui, | |
mask_dilation_b_gui, | |
mask_blur_b_gui, | |
mask_padding_b_gui, | |
retain_task_cache_gui, | |
image_ip1, | |
mask_ip1, | |
model_ip1, | |
mode_ip1, | |
scale_ip1, | |
image_ip2, | |
mask_ip2, | |
model_ip2, | |
mode_ip2, | |
scale_ip2, | |
], | |
outputs=[ | |
result_images, | |
actual_task_info | |
], | |
queue=True, | |
show_progress="minimal", | |
) | |
app.queue() | |
app.launch( | |
show_error=True, | |
debug=True, | |
) | |