Upload 2 files
Browse files
dc.py
CHANGED
@@ -351,7 +351,6 @@ class GuiSD:
|
|
351 |
|
352 |
## BEGIN MOD
|
353 |
loras_list = [s if s else "None" for s in loras_list]
|
354 |
-
prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
|
355 |
global lora_model_list
|
356 |
lora_model_list = get_lora_model_list()
|
357 |
## END MOD
|
@@ -706,7 +705,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
706 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
707 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
708 |
sampler = "Euler", vae = None, translate=True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
|
709 |
-
progress=gr.Progress(track_tqdm=True)):
|
710 |
MAX_SEED = np.iinfo(np.int32).max
|
711 |
|
712 |
image_previews = True
|
@@ -727,7 +726,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
727 |
prompt = translate_to_en(prompt)
|
728 |
negative_prompt = translate_to_en(prompt)
|
729 |
|
730 |
-
prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
|
731 |
progress(0.5, desc="Preparing...")
|
732 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
|
733 |
set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
|
@@ -767,8 +766,8 @@ def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidanc
|
|
767 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
768 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
769 |
sampler = "Euler", vae = None, translate = True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
|
770 |
-
progress=gr.Progress(track_tqdm=True)):
|
771 |
-
return gr.update(
|
772 |
|
773 |
|
774 |
infer.zerogpu = True
|
@@ -823,7 +822,6 @@ def load_model_prompt_dict():
|
|
823 |
model_prompt_dict = load_model_prompt_dict()
|
824 |
|
825 |
|
826 |
-
model_recom_prompt_enabled = True
|
827 |
animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
|
828 |
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
829 |
pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
|
@@ -832,7 +830,7 @@ other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, c
|
|
832 |
other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
|
833 |
default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
|
834 |
default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
835 |
-
def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
|
836 |
if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
|
837 |
prompts = to_list(prompt)
|
838 |
neg_prompts = to_list(neg_prompt)
|
@@ -855,12 +853,6 @@ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name
|
|
855 |
return prompt, neg_prompt
|
856 |
|
857 |
|
858 |
-
def enable_model_recom_prompt(is_enable: bool = True):
|
859 |
-
global model_recom_prompt_enabled
|
860 |
-
model_recom_prompt_enabled = is_enable
|
861 |
-
return is_enable
|
862 |
-
|
863 |
-
|
864 |
private_lora_dict = {}
|
865 |
try:
|
866 |
with open('lora_dict.json', encoding='utf-8') as f:
|
|
|
351 |
|
352 |
## BEGIN MOD
|
353 |
loras_list = [s if s else "None" for s in loras_list]
|
|
|
354 |
global lora_model_list
|
355 |
lora_model_list = get_lora_model_list()
|
356 |
## END MOD
|
|
|
705 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
706 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
707 |
sampler = "Euler", vae = None, translate=True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
|
708 |
+
recom_prompt = True, progress=gr.Progress(track_tqdm=True)):
|
709 |
MAX_SEED = np.iinfo(np.int32).max
|
710 |
|
711 |
image_previews = True
|
|
|
726 |
prompt = translate_to_en(prompt)
|
727 |
negative_prompt = translate_to_en(prompt)
|
728 |
|
729 |
+
prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name, recom_prompt)
|
730 |
progress(0.5, desc="Preparing...")
|
731 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
|
732 |
set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
|
|
|
766 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
767 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
768 |
sampler = "Euler", vae = None, translate = True, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
|
769 |
+
recom_prompt = True, progress=gr.Progress(track_tqdm=True)):
|
770 |
+
return gr.update()
|
771 |
|
772 |
|
773 |
infer.zerogpu = True
|
|
|
822 |
model_prompt_dict = load_model_prompt_dict()
|
823 |
|
824 |
|
|
|
825 |
animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
|
826 |
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
827 |
pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
|
|
|
830 |
other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
|
831 |
default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
|
832 |
default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
833 |
+
def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None", model_recom_prompt_enabled = True):
|
834 |
if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
|
835 |
prompts = to_list(prompt)
|
836 |
neg_prompts = to_list(neg_prompt)
|
|
|
853 |
return prompt, neg_prompt
|
854 |
|
855 |
|
|
|
|
|
|
|
|
|
|
|
|
|
856 |
private_lora_dict = {}
|
857 |
try:
|
858 |
with open('lora_dict.json', encoding='utf-8') as f:
|