Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -118,25 +118,30 @@ def download_file(url, directory=None):
|
|
118 |
file.write(response.content)
|
119 |
|
120 |
return filepath
|
121 |
-
|
122 |
def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
|
123 |
selected_index = evt.index
|
124 |
selected_indices = selected_indices or []
|
125 |
if selected_index in selected_indices:
|
126 |
selected_indices.remove(selected_index)
|
127 |
else:
|
128 |
-
if len(selected_indices) < 2
|
129 |
selected_indices.append(selected_index)
|
130 |
else:
|
131 |
-
gr.Warning("You can select up to
|
132 |
-
return gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), width, height, gr.update(), gr.update()
|
133 |
|
134 |
selected_info_1 = "Select a LoRA 1"
|
135 |
selected_info_2 = "Select a LoRA 2"
|
|
|
|
|
136 |
lora_scale_1 = 1.15
|
137 |
lora_scale_2 = 1.15
|
|
|
138 |
lora_image_1 = None
|
139 |
lora_image_2 = None
|
|
|
|
|
140 |
if len(selected_indices) >= 1:
|
141 |
lora1 = loras_state[selected_indices[0]]
|
142 |
selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
|
@@ -145,14 +150,19 @@ def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, h
|
|
145 |
lora2 = loras_state[selected_indices[1]]
|
146 |
selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
|
147 |
lora_image_2 = lora2['image']
|
148 |
-
|
|
|
|
|
|
|
|
|
149 |
if selected_indices:
|
150 |
last_selected_lora = loras_state[selected_indices[-1]]
|
151 |
new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
|
152 |
else:
|
153 |
new_placeholder = "Type a prompt after selecting a LoRA"
|
154 |
|
155 |
-
return gr.update(placeholder=new_placeholder), selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, width, height, lora_image_1, lora_image_2
|
|
|
156 |
|
157 |
def remove_lora_1(selected_indices, loras_state):
|
158 |
if len(selected_indices) >= 1:
|
@@ -350,7 +360,7 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
350 |
).images[0]
|
351 |
return final_image
|
352 |
|
353 |
-
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
354 |
try:
|
355 |
# 한글 감지 및 번역
|
356 |
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
@@ -390,7 +400,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
390 |
for idx, lora in enumerate(selected_loras):
|
391 |
lora_name = f"lora_{idx}"
|
392 |
lora_names.append(lora_name)
|
393 |
-
lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2)
|
394 |
lora_path = lora['repo']
|
395 |
weight_name = lora.get("weights")
|
396 |
print(f"Lora Path: {lora_path}")
|
@@ -435,12 +445,12 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
435 |
raise Exception("Failed to generate image")
|
436 |
|
437 |
return final_image, seed, gr.update(visible=False)
|
|
|
|
|
438 |
except Exception as e:
|
439 |
print(f"Error in run_lora: {str(e)}")
|
440 |
return None, seed, gr.update(visible=False)
|
441 |
|
442 |
-
|
443 |
-
|
444 |
run_lora.zerogpu = True
|
445 |
|
446 |
def get_huggingface_safetensors(link):
|
@@ -636,6 +646,9 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
|
|
636 |
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
|
637 |
with gr.Column(scale=1):
|
638 |
generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
|
|
|
|
|
|
|
639 |
|
640 |
with gr.Row(elem_id="loaded_loras"):
|
641 |
with gr.Column(scale=1, min_width=25):
|
@@ -650,16 +663,18 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
|
|
650 |
lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
|
651 |
with gr.Row():
|
652 |
remove_button_1 = gr.Button("Remove", size="sm")
|
|
|
653 |
with gr.Column(scale=8):
|
654 |
with gr.Row():
|
655 |
with gr.Column(scale=0, min_width=50):
|
656 |
-
|
657 |
with gr.Column(scale=3, min_width=100):
|
658 |
-
|
659 |
with gr.Column(scale=5, min_width=50):
|
660 |
-
|
661 |
with gr.Row():
|
662 |
-
|
|
|
663 |
|
664 |
with gr.Row():
|
665 |
with gr.Column():
|
@@ -776,7 +791,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
|
|
776 |
gr.on(
|
777 |
triggers=[generate_button.click, prompt.submit],
|
778 |
fn=run_lora,
|
779 |
-
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
|
780 |
outputs=[result, seed, progress_bar]
|
781 |
).then(
|
782 |
fn=lambda x, history: update_history(x, history) if x is not None else history,
|
@@ -784,6 +799,8 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
|
|
784 |
outputs=history_gallery,
|
785 |
)
|
786 |
|
|
|
|
|
787 |
upscale_input.upload(
|
788 |
lambda x: gr.update(interactive=x is not None),
|
789 |
inputs=[upscale_input],
|
|
|
118 |
file.write(response.content)
|
119 |
|
120 |
return filepath
|
121 |
+
|
122 |
def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
|
123 |
selected_index = evt.index
|
124 |
selected_indices = selected_indices or []
|
125 |
if selected_index in selected_indices:
|
126 |
selected_indices.remove(selected_index)
|
127 |
else:
|
128 |
+
if len(selected_indices) < 3: # 변경: 2에서 3으로
|
129 |
selected_indices.append(selected_index)
|
130 |
else:
|
131 |
+
gr.Warning("You can select up to 3 LoRAs, remove one to select a new one.")
|
132 |
+
return gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), width, height, gr.update(), gr.update(), gr.update()
|
133 |
|
134 |
selected_info_1 = "Select a LoRA 1"
|
135 |
selected_info_2 = "Select a LoRA 2"
|
136 |
+
selected_info_3 = "Select a LoRA 3"
|
137 |
+
|
138 |
lora_scale_1 = 1.15
|
139 |
lora_scale_2 = 1.15
|
140 |
+
lora_scale_3 = 1.15
|
141 |
lora_image_1 = None
|
142 |
lora_image_2 = None
|
143 |
+
lora_image_2 = None
|
144 |
+
|
145 |
if len(selected_indices) >= 1:
|
146 |
lora1 = loras_state[selected_indices[0]]
|
147 |
selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
|
|
|
150 |
lora2 = loras_state[selected_indices[1]]
|
151 |
selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
|
152 |
lora_image_2 = lora2['image']
|
153 |
+
if len(selected_indices) >= 3:
|
154 |
+
lora3 = loras_state[selected_indices[2]]
|
155 |
+
selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}](https://huggingface.co/{lora3['repo']}) ✨"
|
156 |
+
lora_image_3 = lora3['image']
|
157 |
+
|
158 |
if selected_indices:
|
159 |
last_selected_lora = loras_state[selected_indices[-1]]
|
160 |
new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
|
161 |
else:
|
162 |
new_placeholder = "Type a prompt after selecting a LoRA"
|
163 |
|
164 |
+
return gr.update(placeholder=new_placeholder), selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, width, height, lora_image_1, lora_image_2, lora_image_3
|
165 |
+
|
166 |
|
167 |
def remove_lora_1(selected_indices, loras_state):
|
168 |
if len(selected_indices) >= 1:
|
|
|
360 |
).images[0]
|
361 |
return final_image
|
362 |
|
363 |
+
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
364 |
try:
|
365 |
# 한글 감지 및 번역
|
366 |
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
|
|
400 |
for idx, lora in enumerate(selected_loras):
|
401 |
lora_name = f"lora_{idx}"
|
402 |
lora_names.append(lora_name)
|
403 |
+
lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2 if idx == 1 else lora_scale_3)
|
404 |
lora_path = lora['repo']
|
405 |
weight_name = lora.get("weights")
|
406 |
print(f"Lora Path: {lora_path}")
|
|
|
445 |
raise Exception("Failed to generate image")
|
446 |
|
447 |
return final_image, seed, gr.update(visible=False)
|
448 |
+
|
449 |
+
|
450 |
except Exception as e:
|
451 |
print(f"Error in run_lora: {str(e)}")
|
452 |
return None, seed, gr.update(visible=False)
|
453 |
|
|
|
|
|
454 |
run_lora.zerogpu = True
|
455 |
|
456 |
def get_huggingface_safetensors(link):
|
|
|
646 |
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
|
647 |
with gr.Column(scale=1):
|
648 |
generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
|
649 |
+
|
650 |
+
|
651 |
+
|
652 |
|
653 |
with gr.Row(elem_id="loaded_loras"):
|
654 |
with gr.Column(scale=1, min_width=25):
|
|
|
663 |
lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
|
664 |
with gr.Row():
|
665 |
remove_button_1 = gr.Button("Remove", size="sm")
|
666 |
+
|
667 |
with gr.Column(scale=8):
|
668 |
with gr.Row():
|
669 |
with gr.Column(scale=0, min_width=50):
|
670 |
+
lora_image_3 = gr.Image(label="LoRA 3 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
|
671 |
with gr.Column(scale=3, min_width=100):
|
672 |
+
selected_info_3 = gr.Markdown("Select a LoRA 3")
|
673 |
with gr.Column(scale=5, min_width=50):
|
674 |
+
lora_scale_3 = gr.Slider(label="LoRA 3 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
|
675 |
with gr.Row():
|
676 |
+
remove_button_3 = gr.Button("Remove", size="sm")
|
677 |
+
|
678 |
|
679 |
with gr.Row():
|
680 |
with gr.Column():
|
|
|
791 |
gr.on(
|
792 |
triggers=[generate_button.click, prompt.submit],
|
793 |
fn=run_lora,
|
794 |
+
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, randomize_seed, seed, width, height, loras_state],
|
795 |
outputs=[result, seed, progress_bar]
|
796 |
).then(
|
797 |
fn=lambda x, history: update_history(x, history) if x is not None else history,
|
|
|
799 |
outputs=history_gallery,
|
800 |
)
|
801 |
|
802 |
+
|
803 |
+
|
804 |
upscale_input.upload(
|
805 |
lambda x: gr.update(interactive=x is not None),
|
806 |
inputs=[upscale_input],
|