Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files
app.py
CHANGED
@@ -20,7 +20,6 @@ from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_
|
|
20 |
download_my_lora, get_all_lora_tupled_list, apply_lora_prompt,
|
21 |
update_loras)
|
22 |
from tagger.tagger import predict_tags_wd, compose_prompt_to_copy
|
23 |
-
from tagger.fl2cog import predict_tags_fl2_cog
|
24 |
from tagger.fl2flux import predict_tags_fl2_flux
|
25 |
|
26 |
# Initialize the base model
|
@@ -45,7 +44,9 @@ def change_base_model(repo_id: str, cn_on: bool, progress=gr.Progress(track_tqdm
|
|
45 |
controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=torch.bfloat16)
|
46 |
controlnet = FluxMultiControlNetModel([controlnet_union])
|
47 |
pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=torch.bfloat16)
|
|
|
48 |
last_model = repo_id
|
|
|
49 |
progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
|
50 |
print(f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
|
51 |
else:
|
@@ -53,7 +54,9 @@ def change_base_model(repo_id: str, cn_on: bool, progress=gr.Progress(track_tqdm
|
|
53 |
print(f"Loading model: {repo_id}")
|
54 |
clear_cache()
|
55 |
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
|
|
|
56 |
last_model = repo_id
|
|
|
57 |
progress(1, desc=f"Model loaded: {repo_id}")
|
58 |
print(f"Model loaded: {repo_id}")
|
59 |
except Exception as e:
|
@@ -296,7 +299,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
|
|
296 |
v2_character = gr.Textbox(label="Character", placeholder="hatsune miku", scale=2, visible=False)
|
297 |
v2_series = gr.Textbox(label="Series", placeholder="vocaloid", scale=2, visible=False)
|
298 |
v2_copy = gr.Button(value="Copy to clipboard", size="sm", interactive=False, visible=False)
|
299 |
-
tagger_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use
|
300 |
tagger_generate_from_image = gr.Button(value="Generate Prompt from Image")
|
301 |
prompt = gr.Textbox(label="Prompt", lines=1, max_lines=8, placeholder="Type a prompt")
|
302 |
prompt_enhance = gr.Button(value="Enhance your prompt", variant="secondary")
|
@@ -481,8 +484,6 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
|
|
481 |
show_api=False,
|
482 |
).success(
|
483 |
predict_tags_fl2_flux, [tagger_image, prompt, tagger_algorithms], [prompt], show_api=False,
|
484 |
-
).success(
|
485 |
-
predict_tags_fl2_cog, [tagger_image, prompt, tagger_algorithms], [prompt], show_api=False,
|
486 |
).success(
|
487 |
compose_prompt_to_copy, [v2_character, v2_series, prompt], [prompt], queue=False, show_api=False,
|
488 |
)
|
|
|
20 |
download_my_lora, get_all_lora_tupled_list, apply_lora_prompt,
|
21 |
update_loras)
|
22 |
from tagger.tagger import predict_tags_wd, compose_prompt_to_copy
|
|
|
23 |
from tagger.fl2flux import predict_tags_fl2_flux
|
24 |
|
25 |
# Initialize the base model
|
|
|
44 |
controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=torch.bfloat16)
|
45 |
controlnet = FluxMultiControlNetModel([controlnet_union])
|
46 |
pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=torch.bfloat16)
|
47 |
+
pipe.enable_model_cpu_offload()
|
48 |
last_model = repo_id
|
49 |
+
last_cn_on = cn_on
|
50 |
progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
|
51 |
print(f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
|
52 |
else:
|
|
|
54 |
print(f"Loading model: {repo_id}")
|
55 |
clear_cache()
|
56 |
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
|
57 |
+
pipe.enable_model_cpu_offload()
|
58 |
last_model = repo_id
|
59 |
+
last_cn_on = cn_on
|
60 |
progress(1, desc=f"Model loaded: {repo_id}")
|
61 |
print(f"Model loaded: {repo_id}")
|
62 |
except Exception as e:
|
|
|
299 |
v2_character = gr.Textbox(label="Character", placeholder="hatsune miku", scale=2, visible=False)
|
300 |
v2_series = gr.Textbox(label="Series", placeholder="vocaloid", scale=2, visible=False)
|
301 |
v2_copy = gr.Button(value="Copy to clipboard", size="sm", interactive=False, visible=False)
|
302 |
+
tagger_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use Florence-2-Flux"], label="Algorithms", value=["Use WD Tagger"])
|
303 |
tagger_generate_from_image = gr.Button(value="Generate Prompt from Image")
|
304 |
prompt = gr.Textbox(label="Prompt", lines=1, max_lines=8, placeholder="Type a prompt")
|
305 |
prompt_enhance = gr.Button(value="Enhance your prompt", variant="secondary")
|
|
|
484 |
show_api=False,
|
485 |
).success(
|
486 |
predict_tags_fl2_flux, [tagger_image, prompt, tagger_algorithms], [prompt], show_api=False,
|
|
|
|
|
487 |
).success(
|
488 |
compose_prompt_to_copy, [v2_character, v2_series, prompt], [prompt], queue=False, show_api=False,
|
489 |
)
|
mod.py
CHANGED
@@ -114,6 +114,7 @@ def resize_image(image, target_width, target_height, crop=True):
|
|
114 |
|
115 |
|
116 |
# https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union/blob/main/app.py
|
|
|
117 |
controlnet_union_modes = {
|
118 |
"None": -1,
|
119 |
#"scribble_hed": 0,
|
@@ -128,14 +129,16 @@ controlnet_union_modes = {
|
|
128 |
}
|
129 |
|
130 |
|
|
|
131 |
def get_control_params():
|
|
|
132 |
modes = []
|
133 |
images = []
|
134 |
scales = []
|
135 |
for i, mode in enumerate(control_modes):
|
136 |
if mode == -1 or control_images[i] is None: continue
|
137 |
modes.append(control_modes[i])
|
138 |
-
images.append(control_images[i])
|
139 |
scales.append(control_scales[i])
|
140 |
return modes, images, scales
|
141 |
|
@@ -274,7 +277,7 @@ def description_ui():
|
|
274 |
gr.Markdown(
|
275 |
"""
|
276 |
- Mod of [multimodalart/flux-lora-the-explorer](https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer),
|
277 |
-
[jiuface/FLUX.1-dev-Controlnet-Union](https://huggingface.co/spaces/jiuface/),
|
278 |
[DamarJati/FLUX.1-DEV-Canny](https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny),
|
279 |
[gokaygokay/FLUX-Prompt-Generator](https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator).
|
280 |
"""
|
@@ -305,4 +308,5 @@ def enhance_prompt(input_prompt):
|
|
305 |
|
306 |
|
307 |
load_prompt_enhancer.zerogpu = True
|
308 |
-
fuse_loras.zerogpu = True
|
|
|
|
114 |
|
115 |
|
116 |
# https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union/blob/main/app.py
|
117 |
+
# https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union
|
118 |
controlnet_union_modes = {
|
119 |
"None": -1,
|
120 |
#"scribble_hed": 0,
|
|
|
129 |
}
|
130 |
|
131 |
|
132 |
+
# https://github.com/pytorch/pytorch/issues/123834
|
133 |
def get_control_params():
|
134 |
+
from diffusers.utils import load_image
|
135 |
modes = []
|
136 |
images = []
|
137 |
scales = []
|
138 |
for i, mode in enumerate(control_modes):
|
139 |
if mode == -1 or control_images[i] is None: continue
|
140 |
modes.append(control_modes[i])
|
141 |
+
images.append(load_image(control_images[i]))
|
142 |
scales.append(control_scales[i])
|
143 |
return modes, images, scales
|
144 |
|
|
|
277 |
gr.Markdown(
|
278 |
"""
|
279 |
- Mod of [multimodalart/flux-lora-the-explorer](https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer),
|
280 |
+
[jiuface/FLUX.1-dev-Controlnet-Union](https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union),
|
281 |
[DamarJati/FLUX.1-DEV-Canny](https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny),
|
282 |
[gokaygokay/FLUX-Prompt-Generator](https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator).
|
283 |
"""
|
|
|
308 |
|
309 |
|
310 |
load_prompt_enhancer.zerogpu = True
|
311 |
+
fuse_loras.zerogpu = True
|
312 |
+
|