Spaces:
Running
on
Zero
Running
on
Zero
Upload 4 files
Browse files- app.py +5 -4
- modutils.py +102 -124
- tagger.py +22 -30
app.py
CHANGED
@@ -486,10 +486,7 @@ class GuiSD:
|
|
486 |
retain_task_model_in_cache=False,
|
487 |
)
|
488 |
|
489 |
-
@spaces.GPU
|
490 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
491 |
-
# Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
|
492 |
-
model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
|
493 |
progress(0, desc="Start inference...")
|
494 |
images, image_list = model(**pipe_params)
|
495 |
progress(1, desc="Inference completed.")
|
@@ -524,6 +521,7 @@ class GuiSD:
|
|
524 |
)
|
525 |
yield f"Model loaded: {model_name}"
|
526 |
|
|
|
527 |
def generate_pipeline(
|
528 |
self,
|
529 |
prompt,
|
@@ -843,7 +841,10 @@ class GuiSD:
|
|
843 |
"ip_adapter_scale": params_ip_scale,
|
844 |
}
|
845 |
|
846 |
-
|
|
|
|
|
|
|
847 |
info_state = f"PROCESSING "
|
848 |
info_state += ">"
|
849 |
info_state = f"COMPLETED. Seeds: {str(seed)}"
|
|
|
486 |
retain_task_model_in_cache=False,
|
487 |
)
|
488 |
|
|
|
489 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
|
490 |
progress(0, desc="Start inference...")
|
491 |
images, image_list = model(**pipe_params)
|
492 |
progress(1, desc="Inference completed.")
|
|
|
521 |
)
|
522 |
yield f"Model loaded: {model_name}"
|
523 |
|
524 |
+
@spaces.GPU
|
525 |
def generate_pipeline(
|
526 |
self,
|
527 |
prompt,
|
|
|
841 |
"ip_adapter_scale": params_ip_scale,
|
842 |
}
|
843 |
|
844 |
+
# Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
|
845 |
+
self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
|
846 |
+
|
847 |
+
progress(0, desc="Preparation completed. Starting inference...")
|
848 |
info_state = f"PROCESSING "
|
849 |
info_state += ">"
|
850 |
info_state = f"COMPLETED. Seeds: {str(seed)}"
|
modutils.py
CHANGED
@@ -4,15 +4,15 @@ import gradio as gr
|
|
4 |
from PIL import Image
|
5 |
from huggingface_hub import HfApi
|
6 |
from requests import HTTPError, Timeout
|
7 |
-
|
8 |
|
9 |
HF_LORA_PRIVATE_REPOS1 = ['John6666/loratest1', 'John6666/loratest3', 'John6666/loratest4', 'John6666/loratest6']
|
10 |
-
HF_LORA_PRIVATE_REPOS2 = ['John6666/loratest10', 'John6666/loratest']
|
11 |
HF_LORA_PRIVATE_REPOS = HF_LORA_PRIVATE_REPOS1 + HF_LORA_PRIVATE_REPOS2
|
12 |
-
HF_LORA_ESSENTIAL_PRIVATE_REPO = 'John6666/loratest1'
|
13 |
directory_loras = 'loras'
|
14 |
CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
|
15 |
-
|
16 |
|
17 |
def get_user_agent():
|
18 |
return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
|
@@ -38,7 +38,6 @@ def change_interface_mode(mode: str):
|
|
38 |
|
39 |
|
40 |
def get_local_model_list(dir_path):
|
41 |
-
from pathlib import Path
|
42 |
model_list = []
|
43 |
valid_extensions = ('.ckpt', '.pt', '.pth', '.safetensors', '.bin')
|
44 |
for file in Path(dir_path).glob("*"):
|
@@ -71,43 +70,37 @@ def escape_lora_basename(basename: str):
|
|
71 |
|
72 |
def download_private_repo(repo_id, dir_path, is_replace):
|
73 |
from huggingface_hub import snapshot_download
|
74 |
-
hf_read_token = os.environ.get('HF_READ_TOKEN')
|
75 |
if not hf_read_token: return
|
76 |
try:
|
77 |
snapshot_download(repo_id=repo_id, local_dir=dir_path, allow_patterns=['*.ckpt', '*.pt', '*.pth', '*.safetensors', '*.bin'], use_auth_token=hf_read_token)
|
78 |
except Exception as e:
|
79 |
return
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}')
|
86 |
-
file.resolve().rename(newpath.resolve())
|
87 |
|
88 |
|
89 |
-
private_model_path_repo_dict = {}
|
90 |
|
91 |
|
92 |
def get_private_model_list(repo_id, dir_path):
|
93 |
global private_model_path_repo_dict
|
94 |
api = HfApi()
|
95 |
-
hf_read_token = os.environ.get('HF_READ_TOKEN')
|
96 |
if not hf_read_token: return []
|
97 |
try:
|
98 |
files = api.list_repo_files(repo_id, token=hf_read_token)
|
99 |
except Exception as e:
|
100 |
return []
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
path
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
private_model_path_repo_dict[model] = repo_id
|
110 |
-
return model_list
|
111 |
|
112 |
|
113 |
def get_private_lora_model_lists():
|
@@ -123,10 +116,8 @@ def get_private_lora_model_lists():
|
|
123 |
|
124 |
def download_private_file(repo_id, path, is_replace):
|
125 |
from huggingface_hub import hf_hub_download
|
126 |
-
from pathlib import Path
|
127 |
file = Path(path)
|
128 |
newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}') if is_replace else file
|
129 |
-
hf_read_token = os.environ.get('HF_READ_TOKEN')
|
130 |
if not hf_read_token or newpath.exists(): return
|
131 |
filename = file.name
|
132 |
dirname = file.parent.name
|
@@ -134,9 +125,8 @@ def download_private_file(repo_id, path, is_replace):
|
|
134 |
hf_hub_download(repo_id=repo_id, filename=filename, local_dir=dirname, use_auth_token=hf_read_token)
|
135 |
except Exception as e:
|
136 |
return
|
137 |
-
|
138 |
-
|
139 |
-
file.resolve().rename(newpath.resolve())
|
140 |
|
141 |
|
142 |
def download_private_file_from_somewhere(path, is_replace):
|
@@ -153,17 +143,16 @@ def get_model_id_list():
|
|
153 |
models_john = api.list_models(author="John6666", cardData=True, sort="last_modified")
|
154 |
except Exception as e:
|
155 |
return model_ids
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
if
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
return model_ids
|
167 |
|
168 |
|
169 |
def get_t2i_model_info(repo_id: str):
|
@@ -171,25 +160,24 @@ def get_t2i_model_info(repo_id: str):
|
|
171 |
try:
|
172 |
if " " in repo_id or not api.repo_exists(repo_id): return ""
|
173 |
model = api.model_info(repo_id=repo_id)
|
174 |
-
except
|
175 |
return ""
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
return gr.update(value=md)
|
193 |
|
194 |
|
195 |
def get_tupled_model_list(model_list):
|
@@ -202,28 +190,28 @@ def get_tupled_model_list(model_list):
|
|
202 |
model = api.model_info(repo_id=repo_id)
|
203 |
except Exception as e:
|
204 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
else:
|
206 |
-
|
207 |
-
|
208 |
-
info = []
|
209 |
-
if not 'diffusers' in tags: continue
|
210 |
-
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
211 |
-
info.append("SDXL")
|
212 |
-
elif 'diffusers:StableDiffusionPipeline' in tags:
|
213 |
-
info.append("SD1.5")
|
214 |
-
if model.card_data and model.card_data.tags:
|
215 |
-
info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
|
216 |
-
if "pony" in info:
|
217 |
-
info.remove("pony")
|
218 |
-
name = f"{repo_id} (Pony🐴, {', '.join(info)})"
|
219 |
-
else:
|
220 |
-
name = f"{repo_id} ({', '.join(info)})"
|
221 |
-
tupled_list.append((name, repo_id))
|
222 |
return tupled_list
|
223 |
|
224 |
|
225 |
-
def save_gallery_images(images):
|
226 |
from datetime import datetime, timezone, timedelta
|
|
|
227 |
dt_now = datetime.now(timezone(timedelta(hours=9)))
|
228 |
basename = dt_now.strftime('%Y%m%d_%H%M%S_')
|
229 |
i = 1
|
@@ -231,13 +219,13 @@ def save_gallery_images(images):
|
|
231 |
output_images = []
|
232 |
output_paths = []
|
233 |
for image in images:
|
234 |
-
from pathlib import Path
|
235 |
filename = basename + str(i) + ".png"
|
236 |
oldpath = Path(image[0]) # Path(image[0])
|
237 |
newpath = oldpath.resolve().rename(Path(filename).resolve())
|
238 |
output_paths.append(str(newpath))
|
239 |
output_images.append((str(newpath), str(filename)))
|
240 |
i += 1
|
|
|
241 |
return gr.update(value=output_images), gr.update(value=output_paths), gr.update(visible=True),
|
242 |
|
243 |
|
@@ -330,7 +318,6 @@ def get_civitai_info(path):
|
|
330 |
from urllib3.util import Retry
|
331 |
from requests.adapters import HTTPAdapter
|
332 |
if path in set(civitai_not_exists_list): return ["", "", "", "", ""]
|
333 |
-
from pathlib import Path
|
334 |
if not Path(path).exists(): return None
|
335 |
user_agent = get_user_agent()
|
336 |
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
@@ -348,24 +335,22 @@ def get_civitai_info(path):
|
|
348 |
r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
|
349 |
except (HTTPError, Timeout) as e:
|
350 |
return ["", "", "", "", ""]
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
return items
|
364 |
|
365 |
|
366 |
def update_lora_dict(path):
|
367 |
global lora_trigger_dict
|
368 |
-
from pathlib import Path
|
369 |
key = escape_lora_basename(Path(path).stem)
|
370 |
if key in lora_trigger_dict.keys(): return
|
371 |
items = get_civitai_info(path)
|
@@ -375,7 +360,6 @@ def update_lora_dict(path):
|
|
375 |
|
376 |
def get_lora_tupled_list(lora_model_list):
|
377 |
global lora_trigger_dict
|
378 |
-
from pathlib import Path
|
379 |
if not lora_model_list: return []
|
380 |
tupled_list = []
|
381 |
local_models = set(get_local_model_list(directory_loras))
|
@@ -402,7 +386,6 @@ def get_lora_tupled_list(lora_model_list):
|
|
402 |
|
403 |
|
404 |
def set_lora_trigger(lora_gui: str):
|
405 |
-
from pathlib import Path
|
406 |
if not lora_gui or lora_gui == "None": return gr.update(value="", visible=False), gr.update(visible=False),\
|
407 |
gr.update(value="", visible=False), gr.update(value="None", visible=True)
|
408 |
path = Path(lora_gui)
|
@@ -468,7 +451,6 @@ def upload_file_lora(files, progress=gr.Progress(track_tqdm=True)):
|
|
468 |
|
469 |
def move_file_lora(filepaths):
|
470 |
import shutil
|
471 |
-
from pathlib import Path
|
472 |
for file in filepaths:
|
473 |
path = Path(shutil.move(Path(file).resolve(), Path(f"./{directory_loras}").resolve()))
|
474 |
newpath = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
|
@@ -508,24 +490,23 @@ def search_lora_on_civitai(query: str, allow_model: list[str]):
|
|
508 |
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(3.0, 30))
|
509 |
except (HTTPError, Timeout) as e:
|
510 |
return None
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
for
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
return items
|
529 |
|
530 |
|
531 |
civitai_lora_last_results = {}
|
@@ -808,7 +789,6 @@ textual_inversion_file_token_list = []
|
|
808 |
|
809 |
|
810 |
def get_tupled_embed_list(embed_list):
|
811 |
-
from pathlib import Path
|
812 |
global textual_inversion_file_list
|
813 |
tupled_list = []
|
814 |
for file in embed_list:
|
@@ -837,7 +817,6 @@ def set_textual_inversion_prompt(textual_inversion_gui, prompt_gui, neg_prompt_g
|
|
837 |
ti_prompts = []
|
838 |
ti_neg_prompts = []
|
839 |
for ti in textual_inversion_gui:
|
840 |
-
from pathlib import Path
|
841 |
tokens = textual_inversion_dict.get(Path(ti).name, [Path(ti).stem.replace(",",""), False])
|
842 |
is_positive = tokens[1] == True or "positive" in Path(ti).parent.name
|
843 |
if is_positive: # positive prompt
|
@@ -861,14 +840,13 @@ def get_model_pipeline(repo_id: str):
|
|
861 |
model = api.model_info(repo_id=repo_id)
|
862 |
except Exception as e:
|
863 |
return default
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
864 |
else:
|
865 |
-
|
866 |
-
tags = model.tags
|
867 |
-
if not 'diffusers' in tags: return default
|
868 |
-
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
869 |
-
return "StableDiffusionXLPipeline"
|
870 |
-
elif 'diffusers:StableDiffusionPipeline' in tags:
|
871 |
-
return "StableDiffusionPipeline"
|
872 |
-
else:
|
873 |
-
return default
|
874 |
|
|
|
4 |
from PIL import Image
|
5 |
from huggingface_hub import HfApi
|
6 |
from requests import HTTPError, Timeout
|
7 |
+
from pathlib import Path
|
8 |
|
9 |
HF_LORA_PRIVATE_REPOS1 = ['John6666/loratest1', 'John6666/loratest3', 'John6666/loratest4', 'John6666/loratest6']
|
10 |
+
HF_LORA_PRIVATE_REPOS2 = ['John6666/loratest10', 'John6666/loratest'] # to be sorted as 1 repo
|
11 |
HF_LORA_PRIVATE_REPOS = HF_LORA_PRIVATE_REPOS1 + HF_LORA_PRIVATE_REPOS2
|
12 |
+
HF_LORA_ESSENTIAL_PRIVATE_REPO = 'John6666/loratest1' # to be downloaded on run app
|
13 |
directory_loras = 'loras'
|
14 |
CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
|
15 |
+
hf_read_token = os.environ.get('HF_READ_TOKEN')
|
16 |
|
17 |
def get_user_agent():
|
18 |
return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
|
|
|
38 |
|
39 |
|
40 |
def get_local_model_list(dir_path):
|
|
|
41 |
model_list = []
|
42 |
valid_extensions = ('.ckpt', '.pt', '.pth', '.safetensors', '.bin')
|
43 |
for file in Path(dir_path).glob("*"):
|
|
|
70 |
|
71 |
def download_private_repo(repo_id, dir_path, is_replace):
|
72 |
from huggingface_hub import snapshot_download
|
|
|
73 |
if not hf_read_token: return
|
74 |
try:
|
75 |
snapshot_download(repo_id=repo_id, local_dir=dir_path, allow_patterns=['*.ckpt', '*.pt', '*.pth', '*.safetensors', '*.bin'], use_auth_token=hf_read_token)
|
76 |
except Exception as e:
|
77 |
return
|
78 |
+
if is_replace:
|
79 |
+
for file in Path(dir_path).glob("*"):
|
80 |
+
if file.exists() and "." in file.stem or " " in file.stem and file.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
|
81 |
+
newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}')
|
82 |
+
file.resolve().rename(newpath.resolve())
|
|
|
|
|
83 |
|
84 |
|
85 |
+
private_model_path_repo_dict = {} # {"local filepath": "huggingface repo_id", ...}
|
86 |
|
87 |
|
88 |
def get_private_model_list(repo_id, dir_path):
|
89 |
global private_model_path_repo_dict
|
90 |
api = HfApi()
|
|
|
91 |
if not hf_read_token: return []
|
92 |
try:
|
93 |
files = api.list_repo_files(repo_id, token=hf_read_token)
|
94 |
except Exception as e:
|
95 |
return []
|
96 |
+
model_list = []
|
97 |
+
for file in files:
|
98 |
+
path = Path(f"{dir_path}/{file}")
|
99 |
+
if path.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
|
100 |
+
model_list.append(str(path))
|
101 |
+
for model in model_list:
|
102 |
+
private_model_path_repo_dict[model] = repo_id
|
103 |
+
return model_list
|
|
|
|
|
104 |
|
105 |
|
106 |
def get_private_lora_model_lists():
|
|
|
116 |
|
117 |
def download_private_file(repo_id, path, is_replace):
|
118 |
from huggingface_hub import hf_hub_download
|
|
|
119 |
file = Path(path)
|
120 |
newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}') if is_replace else file
|
|
|
121 |
if not hf_read_token or newpath.exists(): return
|
122 |
filename = file.name
|
123 |
dirname = file.parent.name
|
|
|
125 |
hf_hub_download(repo_id=repo_id, filename=filename, local_dir=dirname, use_auth_token=hf_read_token)
|
126 |
except Exception as e:
|
127 |
return
|
128 |
+
if is_replace:
|
129 |
+
file.resolve().rename(newpath.resolve())
|
|
|
130 |
|
131 |
|
132 |
def download_private_file_from_somewhere(path, is_replace):
|
|
|
143 |
models_john = api.list_models(author="John6666", cardData=True, sort="last_modified")
|
144 |
except Exception as e:
|
145 |
return model_ids
|
146 |
+
for model in models_vp:
|
147 |
+
model_ids.append(model.id) if not model.private else ""
|
148 |
+
anime_models = []
|
149 |
+
real_models = []
|
150 |
+
for model in models_john:
|
151 |
+
if not model.private:
|
152 |
+
anime_models.append(model.id) if 'anime' in model.tags else real_models.append(model.id)
|
153 |
+
model_ids.extend(anime_models)
|
154 |
+
model_ids.extend(real_models)
|
155 |
+
return model_ids
|
|
|
156 |
|
157 |
|
158 |
def get_t2i_model_info(repo_id: str):
|
|
|
160 |
try:
|
161 |
if " " in repo_id or not api.repo_exists(repo_id): return ""
|
162 |
model = api.model_info(repo_id=repo_id)
|
163 |
+
except Exception as e:
|
164 |
return ""
|
165 |
+
if model.private or model.gated: return ""
|
166 |
+
tags = model.tags
|
167 |
+
info = []
|
168 |
+
url = f"https://huggingface.co/{repo_id}/"
|
169 |
+
if not 'diffusers' in tags: return ""
|
170 |
+
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
171 |
+
info.append("SDXL")
|
172 |
+
elif 'diffusers:StableDiffusionPipeline' in tags:
|
173 |
+
info.append("SD1.5")
|
174 |
+
if model.card_data and model.card_data.tags:
|
175 |
+
info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
|
176 |
+
info.append(f"DLs: {model.downloads}")
|
177 |
+
info.append(f"likes: {model.likes}")
|
178 |
+
info.append(model.last_modified.strftime("lastmod: %Y-%m-%d"))
|
179 |
+
md = f"Model Info: {', '.join(info)}, [Model Repo]({url})"
|
180 |
+
return gr.update(value=md)
|
|
|
181 |
|
182 |
|
183 |
def get_tupled_model_list(model_list):
|
|
|
190 |
model = api.model_info(repo_id=repo_id)
|
191 |
except Exception as e:
|
192 |
continue
|
193 |
+
if model.private or model.gated: continue
|
194 |
+
tags = model.tags
|
195 |
+
info = []
|
196 |
+
if not 'diffusers' in tags: continue
|
197 |
+
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
198 |
+
info.append("SDXL")
|
199 |
+
elif 'diffusers:StableDiffusionPipeline' in tags:
|
200 |
+
info.append("SD1.5")
|
201 |
+
if model.card_data and model.card_data.tags:
|
202 |
+
info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
|
203 |
+
if "pony" in info:
|
204 |
+
info.remove("pony")
|
205 |
+
name = f"{repo_id} (Pony🐴, {', '.join(info)})"
|
206 |
else:
|
207 |
+
name = f"{repo_id} ({', '.join(info)})"
|
208 |
+
tupled_list.append((name, repo_id))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
return tupled_list
|
210 |
|
211 |
|
212 |
+
def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
213 |
from datetime import datetime, timezone, timedelta
|
214 |
+
progress(0, desc="Updating gallery...")
|
215 |
dt_now = datetime.now(timezone(timedelta(hours=9)))
|
216 |
basename = dt_now.strftime('%Y%m%d_%H%M%S_')
|
217 |
i = 1
|
|
|
219 |
output_images = []
|
220 |
output_paths = []
|
221 |
for image in images:
|
|
|
222 |
filename = basename + str(i) + ".png"
|
223 |
oldpath = Path(image[0]) # Path(image[0])
|
224 |
newpath = oldpath.resolve().rename(Path(filename).resolve())
|
225 |
output_paths.append(str(newpath))
|
226 |
output_images.append((str(newpath), str(filename)))
|
227 |
i += 1
|
228 |
+
progress(1, desc="Gallery updated.")
|
229 |
return gr.update(value=output_images), gr.update(value=output_paths), gr.update(visible=True),
|
230 |
|
231 |
|
|
|
318 |
from urllib3.util import Retry
|
319 |
from requests.adapters import HTTPAdapter
|
320 |
if path in set(civitai_not_exists_list): return ["", "", "", "", ""]
|
|
|
321 |
if not Path(path).exists(): return None
|
322 |
user_agent = get_user_agent()
|
323 |
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
|
|
335 |
r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
|
336 |
except (HTTPError, Timeout) as e:
|
337 |
return ["", "", "", "", ""]
|
338 |
+
if not r.ok: return None
|
339 |
+
json = r.json()
|
340 |
+
if not 'baseModel' in json:
|
341 |
+
civitai_not_exists_list.append(path)
|
342 |
+
return ["", "", "", "", ""]
|
343 |
+
items = []
|
344 |
+
items.append(" / ".join(json['trainedWords']))
|
345 |
+
items.append(json['baseModel'])
|
346 |
+
items.append(json['model']['name'])
|
347 |
+
items.append(f"https://civitai.com/models/{json['modelId']}")
|
348 |
+
items.append(json['images'][0]['url'])
|
349 |
+
return items
|
|
|
350 |
|
351 |
|
352 |
def update_lora_dict(path):
|
353 |
global lora_trigger_dict
|
|
|
354 |
key = escape_lora_basename(Path(path).stem)
|
355 |
if key in lora_trigger_dict.keys(): return
|
356 |
items = get_civitai_info(path)
|
|
|
360 |
|
361 |
def get_lora_tupled_list(lora_model_list):
|
362 |
global lora_trigger_dict
|
|
|
363 |
if not lora_model_list: return []
|
364 |
tupled_list = []
|
365 |
local_models = set(get_local_model_list(directory_loras))
|
|
|
386 |
|
387 |
|
388 |
def set_lora_trigger(lora_gui: str):
|
|
|
389 |
if not lora_gui or lora_gui == "None": return gr.update(value="", visible=False), gr.update(visible=False),\
|
390 |
gr.update(value="", visible=False), gr.update(value="None", visible=True)
|
391 |
path = Path(lora_gui)
|
|
|
451 |
|
452 |
def move_file_lora(filepaths):
|
453 |
import shutil
|
|
|
454 |
for file in filepaths:
|
455 |
path = Path(shutil.move(Path(file).resolve(), Path(f"./{directory_loras}").resolve()))
|
456 |
newpath = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
|
|
|
490 |
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(3.0, 30))
|
491 |
except (HTTPError, Timeout) as e:
|
492 |
return None
|
493 |
+
if not r.ok: return None
|
494 |
+
json = r.json()
|
495 |
+
if not 'items' in json: return None
|
496 |
+
items = []
|
497 |
+
for j in json['items']:
|
498 |
+
for model in j['modelVersions']:
|
499 |
+
item = {}
|
500 |
+
if not model['baseModel'] in set(allow_model): continue
|
501 |
+
item['name'] = j['name']
|
502 |
+
item['creator'] = j['creator']['username']
|
503 |
+
item['tags'] = j['tags']
|
504 |
+
item['model_name'] = model['name']
|
505 |
+
item['base_model'] = model['baseModel']
|
506 |
+
item['dl_url'] = model['downloadUrl']
|
507 |
+
item['md'] = f'<img src="{model["images"][0]["url"]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL](https://civitai.com/models/{j["id"]})'
|
508 |
+
items.append(item)
|
509 |
+
return items
|
|
|
510 |
|
511 |
|
512 |
civitai_lora_last_results = {}
|
|
|
789 |
|
790 |
|
791 |
def get_tupled_embed_list(embed_list):
|
|
|
792 |
global textual_inversion_file_list
|
793 |
tupled_list = []
|
794 |
for file in embed_list:
|
|
|
817 |
ti_prompts = []
|
818 |
ti_neg_prompts = []
|
819 |
for ti in textual_inversion_gui:
|
|
|
820 |
tokens = textual_inversion_dict.get(Path(ti).name, [Path(ti).stem.replace(",",""), False])
|
821 |
is_positive = tokens[1] == True or "positive" in Path(ti).parent.name
|
822 |
if is_positive: # positive prompt
|
|
|
840 |
model = api.model_info(repo_id=repo_id)
|
841 |
except Exception as e:
|
842 |
return default
|
843 |
+
if model.private or model.gated: return default
|
844 |
+
tags = model.tags
|
845 |
+
if not 'diffusers' in tags: return default
|
846 |
+
if 'diffusers:StableDiffusionXLPipeline' in tags:
|
847 |
+
return "StableDiffusionXLPipeline"
|
848 |
+
elif 'diffusers:StableDiffusionPipeline' in tags:
|
849 |
+
return "StableDiffusionPipeline"
|
850 |
else:
|
851 |
+
return default
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
852 |
|
tagger.py
CHANGED
@@ -49,6 +49,18 @@ DANBOORU_TO_E621_RATING_MAP = {
|
|
49 |
}
|
50 |
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
def load_dict_from_csv(filename):
|
53 |
with open(filename, 'r', encoding="utf-8") as f:
|
54 |
lines = f.readlines()
|
@@ -230,21 +242,16 @@ def convert_tags_to_ja(input_prompt: str = ""):
|
|
230 |
enable_auto_recom_prompt = True
|
231 |
|
232 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
|
234 |
-
def to_list(s):
|
235 |
-
return [x.strip() for x in s.split(",") if not s == ""]
|
236 |
-
|
237 |
-
def list_sub(a, b):
|
238 |
-
return [e for e in a if e not in b]
|
239 |
-
|
240 |
-
def list_uniq(l):
|
241 |
-
return sorted(set(l), key=l.index)
|
242 |
-
|
243 |
global enable_auto_recom_prompt
|
244 |
-
animagine_ps = to_list("anime artwork, anime style, key visual, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
|
245 |
-
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
246 |
-
pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
|
247 |
-
pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
|
248 |
prompts = to_list(prompt)
|
249 |
neg_prompts = to_list(neg_prompt)
|
250 |
|
@@ -283,22 +290,7 @@ model_prompt_dict = load_model_prompt_dict()
|
|
283 |
|
284 |
|
285 |
def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
|
286 |
-
def to_list(s):
|
287 |
-
return [x.strip() for x in s.split(",") if not s == ""]
|
288 |
-
|
289 |
-
def list_sub(a, b):
|
290 |
-
return [e for e in a if e not in b]
|
291 |
-
|
292 |
-
def list_uniq(l):
|
293 |
-
return sorted(set(l), key=l.index)
|
294 |
-
|
295 |
if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
|
296 |
-
animagine_ps = to_list("anime artwork, anime style, key visual, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
|
297 |
-
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
298 |
-
pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
|
299 |
-
pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
|
300 |
-
other_ps = to_list("anime artwork, anime style, key visual, vibrant, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
|
301 |
-
other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
|
302 |
prompts = to_list(prompt)
|
303 |
neg_prompts = to_list(neg_prompt)
|
304 |
prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
|
@@ -311,8 +303,8 @@ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name
|
|
311 |
ps = to_list(model_prompt_dict[model_name]["prompt"])
|
312 |
nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
|
313 |
else:
|
314 |
-
ps =
|
315 |
-
nps =
|
316 |
prompts = prompts + ps
|
317 |
neg_prompts = neg_prompts + nps
|
318 |
prompt = ", ".join(list_uniq(prompts) + last_empty_p)
|
|
|
49 |
}
|
50 |
|
51 |
|
52 |
+
def to_list(s):
|
53 |
+
return [x.strip() for x in s.split(",") if not s == ""]
|
54 |
+
|
55 |
+
|
56 |
+
def list_sub(a, b):
|
57 |
+
return [e for e in a if e not in b]
|
58 |
+
|
59 |
+
|
60 |
+
def list_uniq(l):
|
61 |
+
return sorted(set(l), key=l.index)
|
62 |
+
|
63 |
+
|
64 |
def load_dict_from_csv(filename):
|
65 |
with open(filename, 'r', encoding="utf-8") as f:
|
66 |
lines = f.readlines()
|
|
|
242 |
enable_auto_recom_prompt = True
|
243 |
|
244 |
|
245 |
+
animagine_ps = to_list("anime artwork, anime style, key visual, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
|
246 |
+
animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
247 |
+
pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
|
248 |
+
pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
|
249 |
+
other_ps = to_list("anime artwork, anime style, key visual, vibrant, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
|
250 |
+
other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
|
251 |
+
default_ps = to_list("score_9, score_8_up, score_7_up, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
|
252 |
+
default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
|
253 |
def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
global enable_auto_recom_prompt
|
|
|
|
|
|
|
|
|
255 |
prompts = to_list(prompt)
|
256 |
neg_prompts = to_list(neg_prompt)
|
257 |
|
|
|
290 |
|
291 |
|
292 |
def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
prompts = to_list(prompt)
|
295 |
neg_prompts = to_list(neg_prompt)
|
296 |
prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
|
|
|
303 |
ps = to_list(model_prompt_dict[model_name]["prompt"])
|
304 |
nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
|
305 |
else:
|
306 |
+
ps = default_ps
|
307 |
+
nps = default_nps
|
308 |
prompts = prompts + ps
|
309 |
neg_prompts = neg_prompts + nps
|
310 |
prompt = ", ".join(list_uniq(prompts) + last_empty_p)
|