Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -355,79 +355,79 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
355 |
).images[0]
|
356 |
return final_image
|
357 |
|
358 |
-
# run_lora 함수 수정
|
359 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
360 |
try:
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
prepends.append(trigger_word)
|
382 |
-
else:
|
383 |
-
appends.append(trigger_word)
|
384 |
-
prompt_mash = " ".join(prepends + [prompt] + appends)
|
385 |
-
print("Prompt Mash: ", prompt_mash)
|
386 |
-
|
387 |
-
# Unload previous LoRA weights
|
388 |
-
with calculateDuration("Unloading LoRA"):
|
389 |
-
pipe.unload_lora_weights()
|
390 |
-
pipe_i2i.unload_lora_weights()
|
391 |
-
|
392 |
-
print(pipe.get_active_adapters())
|
393 |
-
# Load LoRA weights with respective scales
|
394 |
-
lora_names = []
|
395 |
-
lora_weights = []
|
396 |
-
with calculateDuration("Loading LoRA weights"):
|
397 |
-
for idx, lora in enumerate(selected_loras):
|
398 |
-
lora_name = f"lora_{idx}"
|
399 |
-
lora_names.append(lora_name)
|
400 |
-
lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2)
|
401 |
-
lora_path = lora['repo']
|
402 |
-
weight_name = lora.get("weights")
|
403 |
-
print(f"Lora Path: {lora_path}")
|
404 |
-
if image_input is not None:
|
405 |
-
if weight_name:
|
406 |
-
pipe_i2i.load_lora_weights(lora_path, weight_name=weight_name, low_cpu_mem_usage=True, adapter_name=lora_name)
|
407 |
else:
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
412 |
else:
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
|
|
|
|
|
|
425 |
|
|
|
426 |
if image_input is not None:
|
427 |
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
|
428 |
return final_image, seed, gr.update(visible=False)
|
429 |
else:
|
430 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
|
431 |
final_image = None
|
432 |
step_counter = 0
|
433 |
for image in image_generator:
|
@@ -444,8 +444,6 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
444 |
print(f"Error in run_lora: {str(e)}")
|
445 |
return None, seed, gr.update(visible=False)
|
446 |
|
447 |
-
|
448 |
-
|
449 |
run_lora.zerogpu = True
|
450 |
|
451 |
def get_huggingface_safetensors(link):
|
|
|
355 |
).images[0]
|
356 |
return final_image
|
357 |
|
|
|
358 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
359 |
try:
|
360 |
+
# 한글 감지 및 번역
|
361 |
+
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
362 |
+
translated = translator(prompt, max_length=512)[0]['translation_text']
|
363 |
+
print(f"Original prompt: {prompt}")
|
364 |
+
print(f"Translated prompt: {translated}")
|
365 |
+
prompt = translated
|
366 |
+
|
367 |
+
if not selected_indices:
|
368 |
+
raise gr.Error("You must select at least one LoRA before proceeding.")
|
369 |
+
|
370 |
+
selected_loras = [loras_state[idx] for idx in selected_indices]
|
371 |
+
|
372 |
+
# Build the prompt with trigger words
|
373 |
+
prepends = []
|
374 |
+
appends = []
|
375 |
+
for lora in selected_loras:
|
376 |
+
trigger_word = lora.get('trigger_word', '')
|
377 |
+
if trigger_word:
|
378 |
+
if lora.get("trigger_position") == "prepend":
|
379 |
+
prepends.append(trigger_word)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
380 |
else:
|
381 |
+
appends.append(trigger_word)
|
382 |
+
prompt_mash = " ".join(prepends + [prompt] + appends)
|
383 |
+
print("Prompt Mash: ", prompt_mash)
|
384 |
+
|
385 |
+
# Unload previous LoRA weights
|
386 |
+
with calculateDuration("Unloading LoRA"):
|
387 |
+
pipe.unload_lora_weights()
|
388 |
+
pipe_i2i.unload_lora_weights()
|
389 |
+
|
390 |
+
print(pipe.get_active_adapters())
|
391 |
+
# Load LoRA weights with respective scales
|
392 |
+
lora_names = []
|
393 |
+
lora_weights = []
|
394 |
+
with calculateDuration("Loading LoRA weights"):
|
395 |
+
for idx, lora in enumerate(selected_loras):
|
396 |
+
lora_name = f"lora_{idx}"
|
397 |
+
lora_names.append(lora_name)
|
398 |
+
lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2)
|
399 |
+
lora_path = lora['repo']
|
400 |
+
weight_name = lora.get("weights")
|
401 |
+
print(f"Lora Path: {lora_path}")
|
402 |
+
if image_input is not None:
|
403 |
+
if weight_name:
|
404 |
+
pipe_i2i.load_lora_weights(lora_path, weight_name=weight_name, low_cpu_mem_usage=True, adapter_name=lora_name)
|
405 |
+
else:
|
406 |
+
pipe_i2i.load_lora_weights(lora_path, low_cpu_mem_usage=True, adapter_name=lora_name)
|
407 |
else:
|
408 |
+
if weight_name:
|
409 |
+
pipe.load_lora_weights(lora_path, weight_name=weight_name, low_cpu_mem_usage=True, adapter_name=lora_name)
|
410 |
+
else:
|
411 |
+
pipe.load_lora_weights(lora_path, low_cpu_mem_usage=True, adapter_name=lora_name)
|
412 |
+
print("Loaded LoRAs:", lora_names)
|
413 |
+
print("Adapter weights:", lora_weights)
|
414 |
+
if image_input is not None:
|
415 |
+
pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
|
416 |
+
else:
|
417 |
+
pipe.set_adapters(lora_names, adapter_weights=lora_weights)
|
418 |
+
print(pipe.get_active_adapters())
|
419 |
+
# Set random seed for reproducibility
|
420 |
+
with calculateDuration("Randomizing seed"):
|
421 |
+
if randomize_seed:
|
422 |
+
seed = random.randint(0, MAX_SEED)
|
423 |
|
424 |
+
# Generate image
|
425 |
if image_input is not None:
|
426 |
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
|
427 |
return final_image, seed, gr.update(visible=False)
|
428 |
else:
|
429 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
430 |
+
# Consume the generator to get the final image
|
431 |
final_image = None
|
432 |
step_counter = 0
|
433 |
for image in image_generator:
|
|
|
444 |
print(f"Error in run_lora: {str(e)}")
|
445 |
return None, seed, gr.update(visible=False)
|
446 |
|
|
|
|
|
447 |
run_lora.zerogpu = True
|
448 |
|
449 |
def get_huggingface_safetensors(link):
|