DiffuseCraftMod / app.py
John6666's picture
Upload 5 files
b47fcc1 verified
raw
history blame
62.3 kB
task_stablepy = {
'txt2img': 'txt2img',
'img2img': 'img2img',
'inpaint': 'inpaint',
'sdxl_canny T2I Adapter': 'sdxl_canny',
'sdxl_sketch T2I Adapter': 'sdxl_sketch',
'sdxl_lineart T2I Adapter': 'sdxl_lineart',
'sdxl_depth-midas T2I Adapter': 'sdxl_depth-midas',
'sdxl_openpose T2I Adapter': 'sdxl_openpose',
'sd_openpose ControlNet': 'openpose',
'sd_canny ControlNet': 'canny',
'sd_mlsd ControlNet': 'mlsd',
'sd_scribble ControlNet': 'scribble',
'sd_softedge ControlNet': 'softedge',
'sd_segmentation ControlNet': 'segmentation',
'sd_depth ControlNet': 'depth',
'sd_normalbae ControlNet': 'normalbae',
'sd_lineart ControlNet': 'lineart',
'sd_lineart_anime ControlNet': 'lineart_anime',
'sd_shuffle ControlNet': 'shuffle',
'sd_ip2p ControlNet': 'ip2p',
}
task_model_list = list(task_stablepy.keys())
#######################
# UTILS
#######################
import spaces
import os
from stablepy import Model_Diffusers
from stablepy.diffusers_vanilla.model import scheduler_names
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
import torch
import re
import shutil
preprocessor_controlnet = {
"openpose": [
"Openpose",
"None",
],
"scribble": [
"HED",
"Pidinet",
"None",
],
"softedge": [
"Pidinet",
"HED",
"HED safe",
"Pidinet safe",
"None",
],
"segmentation": [
"UPerNet",
"None",
],
"depth": [
"DPT",
"Midas",
"None",
],
"normalbae": [
"NormalBae",
"None",
],
"lineart": [
"Lineart",
"Lineart coarse",
"LineartAnime",
"None",
"None (anime)",
],
"shuffle": [
"ContentShuffle",
"None",
],
"canny": [
"Canny"
],
"mlsd": [
"MLSD"
],
"ip2p": [
"ip2p"
]
}
def download_things(directory, url, hf_token="", civitai_api_key=""):
url = url.strip()
if "drive.google.com" in url:
original_dir = os.getcwd()
os.chdir(directory)
os.system(f"gdown --fuzzy {url}")
os.chdir(original_dir)
elif "huggingface.co" in url:
url = url.replace("?download=true", "")
if "/blob/" in url:
url = url.replace("/blob/", "/resolve/")
user_header = f'"Authorization: Bearer {hf_token}"'
if hf_token:
os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
else:
os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
elif "civitai.com" in url:
if "?" in url:
url = url.split("?")[0]
if civitai_api_key:
url = url + f"?token={civitai_api_key}"
os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
else:
print("\033[91mYou need an API key to download Civitai models.\033[0m")
else:
os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
def get_model_list(directory_path):
model_list = []
valid_extensions = {'.ckpt' , '.pt', '.pth', '.safetensors', '.bin'}
for filename in os.listdir(directory_path):
if os.path.splitext(filename)[1] in valid_extensions:
name_without_extension = os.path.splitext(filename)[0]
file_path = os.path.join(directory_path, filename)
# model_list.append((name_without_extension, file_path))
model_list.append(file_path)
print('\033[34mFILE: ' + file_path + '\033[0m')
return model_list
def process_string(input_string):
parts = input_string.split('/')
if len(parts) == 2:
first_element = parts[1]
complete_string = input_string
result = (first_element, complete_string)
return result
else:
return None
directory_models = 'models'
os.makedirs(directory_models, exist_ok=True)
directory_loras = 'loras'
os.makedirs(directory_loras, exist_ok=True)
directory_vaes = 'vaes'
os.makedirs(directory_vaes, exist_ok=True)
# - **Download SD 1.5 Models**
#download_model = "https://huggingface.co/frankjoshua/toonyou_beta6/resolve/main/toonyou_beta6.safetensors"
download_model = ""
# - **Download VAEs**
download_vae_list = [
'https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl.vae.safetensors?download=true',
'https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true',
'https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-blessed.safetensors?download=true',
# 'https://civitai.com/api/download/models/476032',
# 'https://civitai.com/api/download/models/264491',
]
download_vae = ", ".join(download_vae_list)
# - **Download LoRAs**
download_lora_list = [
# 'https://civitai.com/api/download/models/421075',
# 'https://civitai.com/api/download/models/446461',
# 'https://civitai.com/api/download/models/392162',
]
download_lora = ", ".join(download_lora_list)
def get_model_id_list():
from huggingface_hub import HfApi
api = HfApi()
model_ids = []
models_vp = api.list_models(author="votepurchase", cardData=True, sort="likes")
models_john = api.list_models(author="John6666", cardData=True, sort="last_modified")
for model in models_vp:
model_ids.append(model.id) if not model.private else ""
anime_models = []
real_models = []
for model in models_john:
if not model.private:
anime_models.append(model.id) if 'anime' in model.tags else real_models.append(model.id)
model_ids.extend(anime_models)
model_ids.extend(real_models)
return model_ids
load_diffusers_format_model = get_model_id_list()
CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
hf_token = os.environ.get("HF_TOKEN")
# Download stuffs
for url in [url.strip() for url in download_model.split(',')]:
if not os.path.exists(f"./models/{url.split('/')[-1]}"):
download_things(directory_models, url, hf_token, CIVITAI_API_KEY)
for url in [url.strip() for url in download_vae.split(',')]:
if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
download_things(directory_vaes, url, hf_token, CIVITAI_API_KEY)
for url in [url.strip() for url in download_lora.split(',')]:
if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
# Download Embeddings
directory_embeds = 'embedings'
os.makedirs(directory_embeds, exist_ok=True)
download_embeds = [
'https://huggingface.co/datasets/Nerfgun3/bad_prompt/resolve/main/bad_prompt.pt',
'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
'https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors',
'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
'https://huggingface.co/embed/negative/resolve/main/bad-artist.pt',
'https://huggingface.co/embed/negative/resolve/main/ng_deepnegative_v1_75t.pt',
'https://huggingface.co/embed/negative/resolve/main/bad-artist-anime.pt',
'https://huggingface.co/embed/negative/resolve/main/bad-image-v2-39000.pt',
'https://huggingface.co/embed/negative/resolve/main/verybadimagenegative_v1.3.pt',
]
for url_embed in download_embeds:
if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
download_things(directory_embeds, url_embed, hf_token, CIVITAI_API_KEY)
# Build list models
embed_list = get_model_list(directory_embeds)
model_list = get_model_list(directory_models)
model_list = load_diffusers_format_model + model_list
lora_model_list = get_model_list(directory_loras)
lora_model_list.insert(0, "None")
vae_model_list = get_model_list(directory_vaes)
vae_model_list.insert(0, "None")
print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
upscaler_dict_gui = {
None : None,
"Lanczos" : "Lanczos",
"Nearest" : "Nearest",
"RealESRGAN_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
"RealESRNet_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
"RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
"RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
"realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
"realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
"realesr-general-wdn-x4v3" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
"4x-UltraSharp" : "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
"4x_foolhardy_Remacri" : "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
"Remacri4xExtraSmoother" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
"AnimeSharp4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
"lollypop" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
"RealisticRescaler4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
"NickelbackFS4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
}
def extract_parameters(input_string):
parameters = {}
input_string = input_string.replace("\n", "")
if not "Negative prompt:" in input_string:
print("Negative prompt not detected")
parameters["prompt"] = input_string
return parameters
parm = input_string.split("Negative prompt:")
parameters["prompt"] = parm[0]
if not "Steps:" in parm[1]:
print("Steps not detected")
parameters["neg_prompt"] = parm[1]
return parameters
parm = parm[1].split("Steps:")
parameters["neg_prompt"] = parm[0]
input_string = "Steps:" + parm[1]
# Extracting Steps
steps_match = re.search(r'Steps: (\d+)', input_string)
if steps_match:
parameters['Steps'] = int(steps_match.group(1))
# Extracting Size
size_match = re.search(r'Size: (\d+x\d+)', input_string)
if size_match:
parameters['Size'] = size_match.group(1)
width, height = map(int, parameters['Size'].split('x'))
parameters['width'] = width
parameters['height'] = height
# Extracting other parameters
other_parameters = re.findall(r'(\w+): (.*?)(?=, \w+|$)', input_string)
for param in other_parameters:
parameters[param[0]] = param[1].strip('"')
return parameters
#######################
# GUI
#######################
import spaces
import gradio as gr
from PIL import Image
import IPython.display
import time, json
from IPython.utils import capture
import logging
logging.getLogger("diffusers").setLevel(logging.ERROR)
import diffusers
diffusers.utils.logging.set_verbosity(40)
import warnings
warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
from stablepy import logger
logger.setLevel(logging.DEBUG)
from v2 import (
V2UI,
parse_upsampling_output,
V2_ALL_MODELS,
)
from utils import (
gradio_copy_text,
COPY_ACTION_JS,
V2_ASPECT_RATIO_OPTIONS,
V2_RATING_OPTIONS,
V2_LENGTH_OPTIONS,
V2_IDENTITY_OPTIONS
)
from tagger import (
predict_tags,
convert_danbooru_to_e621_prompt,
remove_specific_prompt,
insert_recom_prompt,
compose_prompt_to_copy,
translate_prompt,
)
def description_ui():
gr.Markdown(
"""
## Danbooru Tags Transformer V2 Demo with WD Tagger
Image => Prompt => Upsampled longer prompt
- Mod of p1atdev's [Danbooru Tags Transformer V2 Demo](https://huggingface.co/spaces/p1atdev/danbooru-tags-transformer-v2) and [WD Tagger with 🤗 transformers](https://huggingface.co/spaces/p1atdev/wd-tagger-transformers).
- Models: p1atdev's [wd-swinv2-tagger-v3-hf](https://huggingface.co/p1atdev/wd-swinv2-tagger-v3-hf), [dart-v2-moe-sft](https://huggingface.co/p1atdev/dart-v2-moe-sft), [dart-v2-sft](https://huggingface.co/p1atdev/dart-v2-sft)
"""
)
class GuiSD:
def __init__(self):
self.model = None
@spaces.GPU
def infer_short(self, model, pipe_params):
images, image_list = model(**pipe_params)
return images
@spaces.GPU(duration=120)
def infer(self, model, pipe_params):
images, image_list = model(**pipe_params)
return images
def generate_pipeline(
self,
prompt,
neg_prompt,
num_images,
steps,
cfg,
clip_skip,
seed,
lora1,
lora_scale1,
lora2,
lora_scale2,
lora3,
lora_scale3,
lora4,
lora_scale4,
lora5,
lora_scale5,
sampler,
img_height,
img_width,
model_name,
vae_model,
task,
image_control,
preprocessor_name,
preprocess_resolution,
image_resolution,
style_prompt, # list []
style_json_file,
image_mask,
strength,
low_threshold,
high_threshold,
value_threshold,
distance_threshold,
controlnet_output_scaling_in_unet,
controlnet_start_threshold,
controlnet_stop_threshold,
textual_inversion,
syntax_weights,
upscaler_model_path,
upscaler_increases_size,
esrgan_tile,
esrgan_tile_overlap,
hires_steps,
hires_denoising_strength,
hires_sampler,
hires_prompt,
hires_negative_prompt,
hires_before_adetailer,
hires_after_adetailer,
loop_generation,
leave_progress_bar,
disable_progress_bar,
image_previews,
display_images,
save_generated_images,
image_storage_location,
retain_compel_previous_load,
retain_detailfix_model_previous_load,
retain_hires_model_previous_load,
t2i_adapter_preprocessor,
t2i_adapter_conditioning_scale,
t2i_adapter_conditioning_factor,
xformers_memory_efficient_attention,
freeu,
generator_in_cpu,
adetailer_inpaint_only,
adetailer_verbose,
adetailer_sampler,
adetailer_active_a,
prompt_ad_a,
negative_prompt_ad_a,
strength_ad_a,
face_detector_ad_a,
person_detector_ad_a,
hand_detector_ad_a,
mask_dilation_a,
mask_blur_a,
mask_padding_a,
adetailer_active_b,
prompt_ad_b,
negative_prompt_ad_b,
strength_ad_b,
face_detector_ad_b,
person_detector_ad_b,
hand_detector_ad_b,
mask_dilation_b,
mask_blur_b,
mask_padding_b,
):
vae_model = vae_model if vae_model != "None" else None
loras_list = [lora1, lora2, lora3, lora4, lora5]
if model_name in model_list:
model_is_xl = "xl" in model_name.lower()
sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
model_type = "SDXL" if model_is_xl else "SD 1.5"
incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
if incompatible_vae:
gr.Info(
f"The selected VAE is for a { 'SD 1.5' if model_is_xl else 'SDXL' } model, but you"
f" are using a { model_type } model. The default VAE "
"will be used."
)
vae_model = None
for la in loras_list:
if la is not None and la != "None":
lora_type = "animetarot" in la.lower()
if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
gr.Info(f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }.")
task = task_stablepy[task]
# First load
model_precision = torch.float16
if not self.model:
from stablepy import Model_Diffusers
print("Loading model...")
self.model = Model_Diffusers(
base_model_id=model_name,
task_name=task,
vae_model=vae_model if vae_model != "None" else None,
type_model_precision=model_precision
)
if task != "txt2img" and not image_control:
raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
if task == "inpaint" and not image_mask:
raise ValueError("No mask image found: Specify one in 'Image Mask'")
if upscaler_model_path in [None, "Lanczos", "Nearest"]:
upscaler_model = upscaler_model_path
else:
directory_upscalers = 'upscalers'
os.makedirs(directory_upscalers, exist_ok=True)
url_upscaler = upscaler_dict_gui[upscaler_model_path]
if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
download_things(directory_upscalers, url_upscaler, hf_token)
upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
print("Config model:", model_name, vae_model, loras_list)
self.model.load_pipe(
model_name,
task_name=task,
vae_model=vae_model if vae_model != "None" else None,
type_model_precision=model_precision
)
if textual_inversion and self.model.class_name == "StableDiffusionXLPipeline":
print("No Textual inversion for SDXL")
adetailer_params_A = {
"face_detector_ad" : face_detector_ad_a,
"person_detector_ad" : person_detector_ad_a,
"hand_detector_ad" : hand_detector_ad_a,
"prompt": prompt_ad_a,
"negative_prompt" : negative_prompt_ad_a,
"strength" : strength_ad_a,
# "image_list_task" : None,
"mask_dilation" : mask_dilation_a,
"mask_blur" : mask_blur_a,
"mask_padding" : mask_padding_a,
"inpaint_only" : adetailer_inpaint_only,
"sampler" : adetailer_sampler,
}
adetailer_params_B = {
"face_detector_ad" : face_detector_ad_b,
"person_detector_ad" : person_detector_ad_b,
"hand_detector_ad" : hand_detector_ad_b,
"prompt": prompt_ad_b,
"negative_prompt" : negative_prompt_ad_b,
"strength" : strength_ad_b,
# "image_list_task" : None,
"mask_dilation" : mask_dilation_b,
"mask_blur" : mask_blur_b,
"mask_padding" : mask_padding_b,
}
pipe_params = {
"prompt": prompt,
"negative_prompt": neg_prompt,
"img_height": img_height,
"img_width": img_width,
"num_images": num_images,
"num_steps": steps,
"guidance_scale": cfg,
"clip_skip": clip_skip,
"seed": seed,
"image": image_control,
"preprocessor_name": preprocessor_name,
"preprocess_resolution": preprocess_resolution,
"image_resolution": image_resolution,
"style_prompt": style_prompt if style_prompt else "",
"style_json_file": "",
"image_mask": image_mask, # only for Inpaint
"strength": strength, # only for Inpaint or ...
"low_threshold": low_threshold,
"high_threshold": high_threshold,
"value_threshold": value_threshold,
"distance_threshold": distance_threshold,
"lora_A": lora1 if lora1 != "None" else None,
"lora_scale_A": lora_scale1,
"lora_B": lora2 if lora2 != "None" else None,
"lora_scale_B": lora_scale2,
"lora_C": lora3 if lora3 != "None" else None,
"lora_scale_C": lora_scale3,
"lora_D": lora4 if lora4 != "None" else None,
"lora_scale_D": lora_scale4,
"lora_E": lora5 if lora5 != "None" else None,
"lora_scale_E": lora_scale5,
"textual_inversion": embed_list if textual_inversion and self.model.class_name != "StableDiffusionXLPipeline" else [],
"syntax_weights": syntax_weights, # "Classic"
"sampler": sampler,
"xformers_memory_efficient_attention": xformers_memory_efficient_attention,
"gui_active": True,
"loop_generation": loop_generation,
"controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
"control_guidance_start": float(controlnet_start_threshold),
"control_guidance_end": float(controlnet_stop_threshold),
"generator_in_cpu": generator_in_cpu,
"FreeU": freeu,
"adetailer_A": adetailer_active_a,
"adetailer_A_params": adetailer_params_A,
"adetailer_B": adetailer_active_b,
"adetailer_B_params": adetailer_params_B,
"leave_progress_bar": leave_progress_bar,
"disable_progress_bar": disable_progress_bar,
"image_previews": image_previews,
"display_images": display_images,
"save_generated_images": save_generated_images,
"image_storage_location": image_storage_location,
"retain_compel_previous_load": retain_compel_previous_load,
"retain_detailfix_model_previous_load": retain_detailfix_model_previous_load,
"retain_hires_model_previous_load": retain_hires_model_previous_load,
"t2i_adapter_preprocessor": t2i_adapter_preprocessor,
"t2i_adapter_conditioning_scale": float(t2i_adapter_conditioning_scale),
"t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
"upscaler_model_path": upscaler_model,
"upscaler_increases_size": upscaler_increases_size,
"esrgan_tile": esrgan_tile,
"esrgan_tile_overlap": esrgan_tile_overlap,
"hires_steps": hires_steps,
"hires_denoising_strength": hires_denoising_strength,
"hires_prompt": hires_prompt,
"hires_negative_prompt": hires_negative_prompt,
"hires_sampler": hires_sampler,
"hires_before_adetailer": hires_before_adetailer,
"hires_after_adetailer": hires_after_adetailer
}
# print(pipe_params)
if (
(img_height > 1700 and img_width > 1700)
or (num_images > 1 and img_height>1048 and img_width>1048)
or (num_images > 1 and upscaler_model)
or (num_images > 1 and adetailer_active_a or num_images > 1 and adetailer_active_b)
or (num_images > 1 and steps>50)
or (adetailer_active_a and adetailer_active_b)
or (upscaler_model and upscaler_increases_size > 1.7)
or (steps > 75)
or (image_resolution > 1048)
):
print("Inference 2")
return self.infer(self.model, pipe_params)
print("Inference 1")
return self.infer_short(self.model, pipe_params)
sd_gen = GuiSD()
CSS ="""
.contain { display: flex; flex-direction: column; }
#component-0 { height: 100%; }
#gallery { flex-grow: 1; }
"""
sdxl_task = task_model_list[:3] + task_model_list[3:8]
sd_task = task_model_list[:3] + task_model_list[8:]
def update_task_options(model_name, task_name):
if model_name in model_list:
if "xl" in model_name.lower():
new_choices = sdxl_task
else:
new_choices = sd_task
if task_name not in new_choices:
task_name = "txt2img"
return gr.update(value=task_name, choices=new_choices)
else:
return gr.update(value=task_name, choices=task_model_list)
with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
gr.Markdown("# 🧩 DiffuseCraft Mod")
gr.Markdown(
f"""
This space is a modification of [r3gm's DiffuseCraft](https://huggingface.co/spaces/r3gm/DiffuseCraft).
"""
)
with gr.Row():
with gr.Tab("Generation"):
v2b = V2UI()
with gr.Column(scale=1):
with gr.Accordion("Model", open=False):
task_gui = gr.Dropdown(label="Task", choices=sdxl_task, value=task_model_list[0])
model_name_gui = gr.Dropdown(label="Model", choices=model_list, value="votepurchase/animagine-xl-3.1", allow_custom_value=True)
with gr.Group():
prompt_gui = gr.Textbox(lines=6, placeholder="1girl, solo, ...", label="Prompt", show_copy_button=True)
with gr.Accordion("Negative prompt, etc.", open=False):
neg_prompt_gui = gr.Textbox(lines=3, placeholder="lowres, (bad), ...", label="Negative prompt", show_copy_button=True)
insert_prompt_gui = gr.Radio(label="Insert reccomended positive / negative prompt", choices=["None", "Animagine", "Pony"], value="None", interactive=True, scale=2)
prompt_type_gui = gr.Radio(label="Output tag conversion", choices=["danbooru", "e621"], value="e621", visible=False)
prompt_type_button = gr.Button(value="Convert prompt to Pony e621 style", size="sm", variant="secondary")
with gr.Row():
character_dbt = gr.Textbox(lines=1, placeholder="kafuu chino, ...", label="Character names", scale=2)
series_dbt = gr.Textbox(lines=1, placeholder="Is the order a rabbit?, ...", label="Series names", scale=2)
generate_db_random_button = gr.Button(value="Generate random prompt from character", size="sm", variant="secondary")
model_name_dbt = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0], visible=False)
rating_dbt = gr.Radio(label="Rating", choices=list(V2_RATING_OPTIONS), value="explicit", visible=False)
aspect_ratio_dbt = gr.Radio(label="Aspect ratio", choices=list(V2_ASPECT_RATIO_OPTIONS), value="square", visible=False)
length_dbt = gr.Radio(label="Length", choices=list(V2_LENGTH_OPTIONS), value="very_long", visible=False)
identity_dbt = gr.Radio(label="Keep identity", choices=list(V2_IDENTITY_OPTIONS), value="lax", visible=False)
ban_tags_dbt = gr.Textbox(label="Ban tags", placeholder="alternate costumen, ...", value="futanari, censored, furry, furrification", visible=False)
elapsed_time_dbt = gr.Markdown(label="Elapsed time", value="", visible=False)
copy_button_dbt = gr.Button(value="Copy to clipboard", visible=False)
translate_prompt_button = gr.Button(value="Translate prompt to English", size="sm", variant="secondary")
generate_button = gr.Button(value="GENERATE IMAGE", size="lg", variant="primary")
model_name_gui.change(
update_task_options,
[model_name_gui, task_gui],
[task_gui],
)
result_images = gr.Gallery(
label="Generated images",
show_label=False,
elem_id="gallery",
columns=[2],
rows=[2],
object_fit="contain",
# height="auto",
interactive=False,
preview=False,
show_share_button=False,
show_download_button=True,
selected_index=50,
)
with gr.Accordion("Generation settings", open=False, visible=True):
steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=28, label="Steps")
cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7.0, label="CFG")
sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler a")
img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Width")
img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
clip_skip_gui = gr.Checkbox(value=False, label="Layer 2 Clip Skip")
free_u_gui = gr.Checkbox(value=False, label="FreeU")
seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
num_images_gui = gr.Slider(minimum=1, maximum=4, step=1, value=1, label="Images")
prompt_s_options = [("Compel (default) format: (word)weight", "Compel"), ("Classic (sd1.5 long prompts) format: (word:weight)", "Classic")]
prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=prompt_s_options, value=prompt_s_options[1][1])
vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list)
with gr.Accordion("ControlNet / Img2img / Inpaint", open=False, visible=True):
image_control = gr.Image(label="Image ControlNet/Inpaint/Img2img", type="filepath")
image_mask_gui = gr.Image(label="Image Mask", type="filepath")
strength_gui = gr.Slider(
minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
info="This option adjusts the level of changes for img2img and inpainting."
)
image_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution")
preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=preprocessor_controlnet["canny"])
def change_preprocessor_choices(task):
task = task_stablepy[task]
if task in preprocessor_controlnet.keys():
choices_task = preprocessor_controlnet[task]
else:
choices_task = preprocessor_controlnet["canny"]
return gr.update(choices=choices_task, value=choices_task[0])
task_gui.change(
change_preprocessor_choices,
[task_gui],
[preprocessor_name_gui],
)
preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocess Resolution")
low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="Canny low threshold")
high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="Canny high threshold")
value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="Hough value threshold (MLSD)")
distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="Hough distance threshold (MLSD)")
control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
with gr.Accordion("T2I adapter", open=False, visible=True):
t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
with gr.Accordion("LoRA", open=False, visible=True):
lora1_gui = gr.Dropdown(label="Lora1", choices=lora_model_list)
lora_scale_1_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 1")
lora2_gui = gr.Dropdown(label="Lora2", choices=lora_model_list)
lora_scale_2_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 2")
lora3_gui = gr.Dropdown(label="Lora3", choices=lora_model_list)
lora_scale_3_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 3")
lora4_gui = gr.Dropdown(label="Lora4", choices=lora_model_list)
lora_scale_4_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 4")
lora5_gui = gr.Dropdown(label="Lora5", choices=lora_model_list)
lora_scale_5_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 5")
with gr.Accordion("Styles", open=False, visible=True):
try:
style_names_found = sd_gen.model.STYLE_NAMES
except:
style_names_found = STYLE_NAMES
style_prompt_gui = gr.Dropdown(
style_names_found,
multiselect=True,
value=None,
label="Style Prompt",
interactive=True,
)
style_json_gui = gr.File(label="Style JSON File")
style_button = gr.Button("Load styles")
def load_json_style_file(json):
if not sd_gen.model:
gr.Info("First load the model")
return gr.update(value=None, choices=STYLE_NAMES)
sd_gen.model.load_style_file(json)
gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
with gr.Accordion("Textual inversion", open=False, visible=False):
active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
with gr.Accordion("Hires fix", open=False, visible=True):
upscaler_keys = list(upscaler_dict_gui.keys())
upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=upscaler_keys, value=upscaler_keys[0])
upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=6., step=0.1, value=1.5, label="Upscale by")
esrgan_tile_gui = gr.Slider(minimum=0, value=100, maximum=500, step=1, label="ESRGAN Tile")
esrgan_tile_overlap_gui = gr.Slider(minimum=1, maximum=200, step=1, value=10, label="ESRGAN Tile Overlap")
hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=["Use same sampler"] + scheduler_names[:-1], value="Use same sampler")
hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
with gr.Accordion("Detailfix", open=False, visible=True):
# Adetailer Inpaint Only
adetailer_inpaint_only_gui = gr.Checkbox(label="Inpaint only", value=True)
# Adetailer Verbose
adetailer_verbose_gui = gr.Checkbox(label="Verbose", value=False)
# Adetailer Sampler
adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1]
adetailer_sampler_gui = gr.Dropdown(label="Adetailer sampler:", choices=adetailer_sampler_options, value="Use same sampler")
with gr.Accordion("Detailfix A", open=False, visible=True):
# Adetailer A
adetailer_active_a_gui = gr.Checkbox(label="Enable Adetailer A", value=False)
prompt_ad_a_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
negative_prompt_ad_a_gui = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
strength_ad_a_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
face_detector_ad_a_gui = gr.Checkbox(label="Face detector", value=True)
person_detector_ad_a_gui = gr.Checkbox(label="Person detector", value=True)
hand_detector_ad_a_gui = gr.Checkbox(label="Hand detector", value=False)
mask_dilation_a_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
mask_blur_a_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
mask_padding_a_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
with gr.Accordion("Detailfix B", open=False, visible=True):
# Adetailer B
adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False)
prompt_ad_b_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
negative_prompt_ad_b_gui = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
strength_ad_b_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
face_detector_ad_b_gui = gr.Checkbox(label="Face detector", value=True)
person_detector_ad_b_gui = gr.Checkbox(label="Person detector", value=True)
hand_detector_ad_b_gui = gr.Checkbox(label="Hand detector", value=False)
mask_dilation_b_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
mask_blur_b_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
mask_padding_b_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
with gr.Accordion("Other settings", open=False, visible=True):
hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
with gr.Accordion("More settings", open=False, visible=False):
loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
image_previews_gui = gr.Checkbox(value=False, label="Image Previews")
display_images_gui = gr.Checkbox(value=False, label="Display Images")
save_generated_images_gui = gr.Checkbox(value=False, label="Save Generated Images")
image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
with gr.Tab("Inpaint mask maker", render=True):
def create_mask_now(img, invert):
import numpy as np
import time
time.sleep(0.5)
transparent_image = img["layers"][0]
# Extract the alpha channel
alpha_channel = np.array(transparent_image)[:, :, 3]
# Create a binary mask by thresholding the alpha channel
binary_mask = alpha_channel > 1
if invert:
print("Invert")
# Invert the binary mask so that the drawn shape is white and the rest is black
binary_mask = np.invert(binary_mask)
# Convert the binary mask to a 3-channel RGB mask
rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
# Convert the mask to uint8
rgb_mask = rgb_mask.astype(np.uint8) * 255
return img["background"], rgb_mask
with gr.Row():
with gr.Column(scale=2):
# image_base = gr.ImageEditor(label="Base image", show_label=True, brush=gr.Brush(colors=["#000000"]))
image_base = gr.ImageEditor(
sources=["upload", "clipboard"],
# crop_size="1:1",
# enable crop (or disable it)
# transforms=["crop"],
brush=gr.Brush(
default_size="16", # or leave it as 'auto'
color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
#default_color="black", # html names are supported
colors=[
"rgba(0, 0, 0, 1)", # rgb(a)
"rgba(0, 0, 0, 0.1)",
"rgba(255, 255, 255, 0.1)",
# "hsl(360, 120, 120)" # in fact any valid colorstring
]
),
eraser=gr.Eraser(default_size="16")
)
invert_mask = gr.Checkbox(value=False, label="Invert mask")
btn = gr.Button("Create mask")
with gr.Column(scale=1):
img_source = gr.Image(interactive=False)
img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
btn_send = gr.Button("Send to the first tab")
btn.click(create_mask_now, [image_base, invert_mask], [img_source, img_result])
def send_img(img_source, img_result):
return img_source, img_result
btn_send.click(send_img, [img_source, img_result], [image_control, image_mask_gui])
v2b.input_components = [
model_name_dbt,
series_dbt,
character_dbt,
prompt_gui,
rating_dbt,
aspect_ratio_dbt,
length_dbt,
identity_dbt,
ban_tags_dbt,
]
insert_prompt_gui.change(
insert_recom_prompt,
inputs=[prompt_gui, neg_prompt_gui, insert_prompt_gui],
outputs=[prompt_gui, neg_prompt_gui],
)
prompt_type_button.click(
convert_danbooru_to_e621_prompt,
inputs=[prompt_gui, prompt_type_gui],
outputs=[prompt_gui],
)
generate_db_random_button.click(
parse_upsampling_output(v2b.on_generate),
inputs=[
*v2b.input_components,
],
outputs=[prompt_gui, elapsed_time_dbt, copy_button_dbt, copy_button_dbt],
)
translate_prompt_button.click(translate_prompt, inputs=[prompt_gui], outputs=[prompt_gui])
translate_prompt_button.click(translate_prompt, inputs=[character_dbt], outputs=[character_dbt])
translate_prompt_button.click(translate_prompt, inputs=[series_dbt], outputs=[series_dbt])
generate_button.click(
fn=sd_gen.generate_pipeline,
inputs=[
prompt_gui,
neg_prompt_gui,
num_images_gui,
steps_gui,
cfg_gui,
clip_skip_gui,
seed_gui,
lora1_gui,
lora_scale_1_gui,
lora2_gui,
lora_scale_2_gui,
lora3_gui,
lora_scale_3_gui,
lora4_gui,
lora_scale_4_gui,
lora5_gui,
lora_scale_5_gui,
sampler_gui,
img_height_gui,
img_width_gui,
model_name_gui,
vae_model_gui,
task_gui,
image_control,
preprocessor_name_gui,
preprocess_resolution_gui,
image_resolution_gui,
style_prompt_gui,
style_json_gui,
image_mask_gui,
strength_gui,
low_threshold_gui,
high_threshold_gui,
value_threshold_gui,
distance_threshold_gui,
control_net_output_scaling_gui,
control_net_start_threshold_gui,
control_net_stop_threshold_gui,
active_textual_inversion_gui,
prompt_syntax_gui,
upscaler_model_path_gui,
upscaler_increases_size_gui,
esrgan_tile_gui,
esrgan_tile_overlap_gui,
hires_steps_gui,
hires_denoising_strength_gui,
hires_sampler_gui,
hires_prompt_gui,
hires_negative_prompt_gui,
hires_before_adetailer_gui,
hires_after_adetailer_gui,
loop_generation_gui,
leave_progress_bar_gui,
disable_progress_bar_gui,
image_previews_gui,
display_images_gui,
save_generated_images_gui,
image_storage_location_gui,
retain_compel_previous_load_gui,
retain_detailfix_model_previous_load_gui,
retain_hires_model_previous_load_gui,
t2i_adapter_preprocessor_gui,
adapter_conditioning_scale_gui,
adapter_conditioning_factor_gui,
xformers_memory_efficient_attention_gui,
free_u_gui,
generator_in_cpu_gui,
adetailer_inpaint_only_gui,
adetailer_verbose_gui,
adetailer_sampler_gui,
adetailer_active_a_gui,
prompt_ad_a_gui,
negative_prompt_ad_a_gui,
strength_ad_a_gui,
face_detector_ad_a_gui,
person_detector_ad_a_gui,
hand_detector_ad_a_gui,
mask_dilation_a_gui,
mask_blur_a_gui,
mask_padding_a_gui,
adetailer_active_b_gui,
prompt_ad_b_gui,
negative_prompt_ad_b_gui,
strength_ad_b_gui,
face_detector_ad_b_gui,
person_detector_ad_b_gui,
hand_detector_ad_b_gui,
mask_dilation_b_gui,
mask_blur_b_gui,
mask_padding_b_gui,
],
outputs=[result_images],
queue=True,
)
with gr.Tab("Danbooru Tags Transformer with WD Tagger", render=True):
v2 = V2UI()
with gr.Column(scale=2):
with gr.Group():
input_image = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"])
with gr.Accordion(label="Advanced options", open=False):
general_threshold = gr.Slider(label="Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.01, interactive=True)
character_threshold = gr.Slider(label="Character threshold", minimum=0.0, maximum=1.0, value=0.8, step=0.01, interactive=True)
keep_tags = gr.Radio(label="Remove tags leaving only the following", choices=["body", "dress", "all"], value="body")
generate_from_image_btn = gr.Button(value="GENERATE TAGS FROM IMAGE", size="lg", variant="primary")
with gr.Group():
input_character = gr.Textbox(label="Character tags", placeholder="hatsune miku")
input_copyright = gr.Textbox(label="Copyright tags", placeholder="vocaloid")
input_general = gr.TextArea(label="General tags", lines=4, placeholder="1girl, ...", value="")
input_tags_to_copy = gr.Textbox(value="", visible=False)
copy_input_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
translate_input_prompt_button = gr.Button(value="Translate prompt to English", size="sm", variant="secondary")
tag_type = gr.Radio(label="Output tag conversion", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="e621", visible=False)
input_rating = gr.Radio(label="Rating", choices=list(V2_RATING_OPTIONS), value="explicit")
with gr.Accordion(label="Advanced options", open=False):
input_aspect_ratio = gr.Radio(label="Aspect ratio", info="The aspect ratio of the image.", choices=list(V2_ASPECT_RATIO_OPTIONS), value="square")
input_length = gr.Radio(label="Length", info="The total length of the tags.", choices=list(V2_LENGTH_OPTIONS), value="very_long")
input_identity = gr.Radio(label="Keep identity", info="How strictly to keep the identity of the character or subject. If you specify the detail of subject in the prompt, you should choose `strict`. Otherwise, choose `none` or `lax`. `none` is very creative but sometimes ignores the input prompt.", choices=list(V2_IDENTITY_OPTIONS), value="lax")
input_ban_tags = gr.Textbox(label="Ban tags", info="Tags to ban from the output.", placeholder="alternate costumen, ...", value="censored")
model_name = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0])
dummy_np = gr.Textbox(label="Negative prompt", value="", visible=False)
recom_animagine = gr.Textbox(label="Animagine reccomended prompt", value="Animagine", visible=False)
recom_pony = gr.Textbox(label="Pony reccomended prompt", value="Pony", visible=False)
generate_btn = gr.Button(value="GENERATE TAGS", size="lg", variant="primary")
with gr.Group():
output_text = gr.TextArea(label="Output tags", interactive=False, show_copy_button=True)
copy_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
elapsed_time_md = gr.Markdown(label="Elapsed time", value="", visible=False)
with gr.Group():
output_text_pony = gr.TextArea(label="Output tags (Pony e621 style)", interactive=False, show_copy_button=True)
copy_btn_pony = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
description_ui()
v2.input_components = [
model_name,
input_copyright,
input_character,
input_general,
input_rating,
input_aspect_ratio,
input_length,
input_identity,
input_ban_tags,
]
translate_input_prompt_button.click(translate_prompt, inputs=[input_general], outputs=[input_general])
translate_input_prompt_button.click(translate_prompt, inputs=[input_character], outputs=[input_character])
translate_input_prompt_button.click(translate_prompt, inputs=[input_copyright], outputs=[input_copyright])
generate_from_image_btn.click(
predict_tags,
inputs=[input_image, general_threshold, character_threshold],
outputs=[
input_copyright,
input_character,
input_general,
copy_input_btn,
],
).then(remove_specific_prompt, inputs=[input_general, keep_tags], outputs=[input_general])
copy_input_btn.click(compose_prompt_to_copy, inputs=[input_character, input_copyright, input_general], outputs=[input_tags_to_copy]).then(
gradio_copy_text, inputs=[input_tags_to_copy], js=COPY_ACTION_JS,
)
generate_btn.click(
parse_upsampling_output(v2.on_generate),
inputs=[
*v2.input_components,
],
outputs=[output_text, elapsed_time_md, copy_btn, copy_btn_pony],
).then(
convert_danbooru_to_e621_prompt, inputs=[output_text, tag_type], outputs=[output_text_pony],
).then(
insert_recom_prompt, inputs=[output_text, dummy_np, recom_animagine], outputs=[output_text, dummy_np],
).then(
insert_recom_prompt, inputs=[output_text_pony, dummy_np, recom_pony], outputs=[output_text_pony, dummy_np],
)
copy_btn.click(gradio_copy_text, inputs=[output_text], js=COPY_ACTION_JS)
copy_btn_pony.click(gradio_copy_text, inputs=[output_text_pony], js=COPY_ACTION_JS)
with gr.Accordion("Examples", open=True, visible=True):
gr.Examples(
examples=[
[
"1girl, souryuu asuka langley, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors, masterpiece, best quality, very aesthetic, absurdres",
"nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1,
30,
7.5,
True,
-1,
None,
1.0,
None,
1.0,
None,
1.0,
None,
1.0,
None,
1.0,
"Euler a",
1152,
896,
"votepurchase/animagine-xl-3.1",
None, # vae
"txt2img",
None, # img conttol
"Canny", # preprocessor
512, # preproc resolution
1024, # img resolution
None, # Style prompt
None, # Style json
None, # img Mask
0.35, # strength
100, # low th canny
200, # high th canny
0.1, # value mstd
0.1, # distance mstd
1.0, # cn scale
0., # cn start
1., # cn end
False, # ti
"Classic",
"Nearest",
],
[
"solo, princess Zelda OOT, score_9, score_8_up, score_8, medium breasts, cute, eyelashes, cute small face, long hair, crown braid, hairclip, pointy ears, soft curvy body, looking at viewer, smile, blush, white dress, medium body, (((holding the Master Sword))), standing, deep forest in the background",
"score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white,",
1,
30,
5.,
True,
-1,
None,
1.0,
None,
1.0,
None,
1.0,
None,
1.0,
None,
1.0,
"Euler a",
1024,
1024,
"votepurchase/ponyDiffusionV6XL",
None, # vae
"txt2img",
None, # img conttol
"Canny", # preprocessor
512, # preproc resolution
1024, # img resolution
None, # Style prompt
None, # Style json
None, # img Mask
0.35, # strength
100, # low th canny
200, # high th canny
0.1, # value mstd
0.1, # distance mstd
1.0, # cn scale
0., # cn start
1., # cn end
False, # ti
"Classic",
"Nearest",
],
],
fn=sd_gen.generate_pipeline,
inputs=[
prompt_gui,
neg_prompt_gui,
num_images_gui,
steps_gui,
cfg_gui,
clip_skip_gui,
seed_gui,
lora1_gui,
lora_scale_1_gui,
lora2_gui,
lora_scale_2_gui,
lora3_gui,
lora_scale_3_gui,
lora4_gui,
lora_scale_4_gui,
lora5_gui,
lora_scale_5_gui,
sampler_gui,
img_height_gui,
img_width_gui,
model_name_gui,
vae_model_gui,
task_gui,
image_control,
preprocessor_name_gui,
preprocess_resolution_gui,
image_resolution_gui,
style_prompt_gui,
style_json_gui,
image_mask_gui,
strength_gui,
low_threshold_gui,
high_threshold_gui,
value_threshold_gui,
distance_threshold_gui,
control_net_output_scaling_gui,
control_net_start_threshold_gui,
control_net_stop_threshold_gui,
active_textual_inversion_gui,
prompt_syntax_gui,
upscaler_model_path_gui,
],
outputs=[result_images],
cache_examples=False,
)
app.queue()
app.launch(
# show_error=True,
# debug=True,
)