Spaces:
Running
on
A100
Running
on
A100
Commit
•
be2828d
1
Parent(s):
09baf03
Update app.py
Browse files
app.py
CHANGED
@@ -93,49 +93,54 @@ def check_selected(selected_state):
|
|
93 |
if not selected_state:
|
94 |
raise gr.Error("You must select a LoRA")
|
95 |
|
96 |
-
def
|
97 |
-
|
98 |
-
|
99 |
-
|
|
|
|
|
|
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
|
110 |
|
111 |
-
|
112 |
-
|
113 |
-
pipe.load_lora_weights(full_path_lora)
|
114 |
-
else:
|
115 |
-
load_incompatible_lora(pipe, full_path_lora, lora_scale)
|
116 |
|
117 |
-
|
118 |
-
|
119 |
-
if ";" in weights_file:
|
120 |
-
weights_file, multiplier = weights_file.split(";")
|
121 |
-
multiplier = float(multiplier)
|
122 |
-
else:
|
123 |
-
multiplier = lora_scale
|
124 |
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
pipe.
|
135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
-
|
138 |
-
return pipe(
|
139 |
prompt=prompt,
|
140 |
negative_prompt=negative,
|
141 |
width=768,
|
@@ -144,26 +149,6 @@ def generate_image(pipe, prompt, negative, cross_attention_kwargs):
|
|
144 |
guidance_scale=7.5,
|
145 |
cross_attention_kwargs=cross_attention_kwargs,
|
146 |
).images[0]
|
147 |
-
|
148 |
-
def run_lora(prompt, negative, lora_scale, selected_state):
|
149 |
-
global last_lora, last_merged, pipe
|
150 |
-
|
151 |
-
if not selected_state:
|
152 |
-
raise gr.Error("You must select a LoRA")
|
153 |
-
|
154 |
-
if negative == "":
|
155 |
-
negative = None
|
156 |
-
|
157 |
-
repo_name = sdxl_loras[selected_state.index]["repo"]
|
158 |
-
full_path_lora = saved_names[selected_state.index]
|
159 |
-
|
160 |
-
cross_attention_kwargs = get_cross_attention_kwargs(
|
161 |
-
lora_scale, repo_name, sdxl_loras[selected_state.index]["is_compatible"])
|
162 |
-
|
163 |
-
load_lora_model(pipe, repo_name, full_path_lora, lora_scale, selected_state)
|
164 |
-
|
165 |
-
image = generate_image(pipe, prompt, negative, cross_attention_kwargs)
|
166 |
-
|
167 |
last_lora = repo_name
|
168 |
return image, gr.update(visible=True)
|
169 |
|
|
|
93 |
if not selected_state:
|
94 |
raise gr.Error("You must select a LoRA")
|
95 |
|
96 |
+
def merge_incompatible_lora(full_path_lora, lora_scale):
|
97 |
+
for weights_file in [full_path_lora]:
|
98 |
+
if ";" in weights_file:
|
99 |
+
weights_file, multiplier = weights_file.split(";")
|
100 |
+
multiplier = float(multiplier)
|
101 |
+
else:
|
102 |
+
multiplier = lora_scale
|
103 |
|
104 |
+
lora_model, weights_sd = lora.create_network_from_weights(
|
105 |
+
multiplier,
|
106 |
+
full_path_lora,
|
107 |
+
pipe.vae,
|
108 |
+
pipe.text_encoder,
|
109 |
+
pipe.unet,
|
110 |
+
for_inference=True,
|
111 |
+
)
|
112 |
+
lora_model.merge_to(
|
113 |
+
pipe.text_encoder, pipe.unet, weights_sd, torch.float16, "cuda"
|
114 |
+
)
|
115 |
|
116 |
+
def run_lora(prompt, negative, lora_scale, selected_state):
|
117 |
+
global last_lora, last_merged, pipe
|
|
|
|
|
|
|
118 |
|
119 |
+
if negative == "":
|
120 |
+
negative = None
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
+
if not selected_state:
|
123 |
+
raise gr.Error("You must select a LoRA")
|
124 |
+
repo_name = sdxl_loras[selected_state.index]["repo"]
|
125 |
+
weight_name = sdxl_loras[selected_state.index]["weights"]
|
126 |
+
full_path_lora = saved_names[selected_state.index]
|
127 |
+
cross_attention_kwargs = None
|
128 |
+
if last_lora != repo_name:
|
129 |
+
if last_merged:
|
130 |
+
pipe = copy.deepcopy(original_pipe)
|
131 |
+
pipe.to(device)
|
132 |
+
else:
|
133 |
+
pipe.unload_lora_weights()
|
134 |
+
is_compatible = sdxl_loras[selected_state.index]["is_compatible"]
|
135 |
+
|
136 |
+
if is_compatible:
|
137 |
+
pipe.load_lora_weights(full_path_lora)
|
138 |
+
cross_attention_kwargs = {"scale": lora_scale}
|
139 |
+
else:
|
140 |
+
merge_incompatible_lora(full_path_lora, lora_scale)
|
141 |
+
last_merged = True
|
142 |
|
143 |
+
image = pipe(
|
|
|
144 |
prompt=prompt,
|
145 |
negative_prompt=negative,
|
146 |
width=768,
|
|
|
149 |
guidance_scale=7.5,
|
150 |
cross_attention_kwargs=cross_attention_kwargs,
|
151 |
).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
last_lora = repo_name
|
153 |
return image, gr.update(visible=True)
|
154 |
|