Commit
·
2f40f84
1
Parent(s):
7efd9a0
Update app.py
Browse files
app.py
CHANGED
|
@@ -31,6 +31,9 @@ saved_names = [
|
|
| 31 |
hf_hub_download(item["repo"], item["weights"]) for item in sdxl_loras
|
| 32 |
]
|
| 33 |
|
|
|
|
|
|
|
|
|
|
| 34 |
css = '''
|
| 35 |
#title{text-align:center}
|
| 36 |
#plus_column{align-self: center}
|
|
@@ -41,19 +44,18 @@ css = '''
|
|
| 41 |
border-top-left-radius: 0px;}
|
| 42 |
'''
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
#@spaces.GPU
|
| 47 |
def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lora_2_scale=0.5, progress=gr.Progress(track_tqdm=True)):
|
| 48 |
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
|
|
|
|
|
|
|
| 49 |
print("Loading LoRAs")
|
| 50 |
-
pipe.load_lora_weights(shuffled_items[0]['
|
| 51 |
pipe.fuse_lora(lora_1_scale)
|
| 52 |
-
pipe.load_lora_weights(shuffled_items[1]['
|
| 53 |
pipe.fuse_lora(lora_2_scale)
|
| 54 |
|
| 55 |
-
|
| 56 |
-
pipe.to("cuda")
|
| 57 |
if negative_prompt == "":
|
| 58 |
negative_prompt = False
|
| 59 |
print("Running inference")
|
|
|
|
| 31 |
hf_hub_download(item["repo"], item["weights"]) for item in sdxl_loras
|
| 32 |
]
|
| 33 |
|
| 34 |
+
for item, saved_name in zip(sdxl_loras, saved_names):
|
| 35 |
+
item["saved_name"] = saved_name
|
| 36 |
+
|
| 37 |
css = '''
|
| 38 |
#title{text-align:center}
|
| 39 |
#plus_column{align-self: center}
|
|
|
|
| 44 |
border-top-left-radius: 0px;}
|
| 45 |
'''
|
| 46 |
|
|
|
|
|
|
|
| 47 |
#@spaces.GPU
|
| 48 |
def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lora_2_scale=0.5, progress=gr.Progress(track_tqdm=True)):
|
| 49 |
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
|
| 50 |
+
pipe.to(torch_dtype=torch.float16)
|
| 51 |
+
pipe.to("cuda")
|
| 52 |
print("Loading LoRAs")
|
| 53 |
+
pipe.load_lora_weights(shuffled_items[0]['saved_name'])
|
| 54 |
pipe.fuse_lora(lora_1_scale)
|
| 55 |
+
pipe.load_lora_weights(shuffled_items[1]['saved_name'])
|
| 56 |
pipe.fuse_lora(lora_2_scale)
|
| 57 |
|
| 58 |
+
|
|
|
|
| 59 |
if negative_prompt == "":
|
| 60 |
negative_prompt = False
|
| 61 |
print("Running inference")
|