Spaces:
Sleeping
Sleeping
Update app.py
Browse filesДобавлена возможность выбора LoRA PussInBoots
app.py
CHANGED
@@ -61,7 +61,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
61 |
model_id_default = "sd-legacy/stable-diffusion-v1-5"
|
62 |
model_dropdown = ['stabilityai/sdxl-turbo', 'CompVis/stable-diffusion-v1-4', 'sd-legacy/stable-diffusion-v1-5' ]
|
63 |
|
64 |
-
model_lora_default = "
|
65 |
model_lora_dropdown = ['lora_lady_and_cats_logos', 'lora_pussinboots_logos' ]
|
66 |
|
67 |
if torch.cuda.is_available():
|
@@ -109,8 +109,8 @@ def infer(
|
|
109 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
110 |
else:
|
111 |
# добавляем lora
|
112 |
-
pipe = get_lora_sd_pipeline(ckpt_dir='./lora_lady_and_cats_logos', base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
|
113 |
-
|
114 |
prompt_embeds = process_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
|
115 |
negative_prompt_embeds = process_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
116 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
|
|
61 |
model_id_default = "sd-legacy/stable-diffusion-v1-5"
|
62 |
model_dropdown = ['stabilityai/sdxl-turbo', 'CompVis/stable-diffusion-v1-4', 'sd-legacy/stable-diffusion-v1-5' ]
|
63 |
|
64 |
+
model_lora_default = "lora_pussinboots_logos"
|
65 |
model_lora_dropdown = ['lora_lady_and_cats_logos', 'lora_pussinboots_logos' ]
|
66 |
|
67 |
if torch.cuda.is_available():
|
|
|
109 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
110 |
else:
|
111 |
# добавляем lora
|
112 |
+
#pipe = get_lora_sd_pipeline(ckpt_dir='./lora_lady_and_cats_logos', base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
|
113 |
+
pipe = get_lora_sd_pipeline(ckpt_dir='./'+model_lora_id, base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
|
114 |
prompt_embeds = process_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
|
115 |
negative_prompt_embeds = process_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
116 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|