Update app.py
Browse files
app.py
CHANGED
@@ -206,7 +206,7 @@ loras = [
|
|
206 |
#--------------------------------------------------Model Initialization-----------------------------------------------------------------------------------------#
|
207 |
|
208 |
dtype = torch.bfloat16
|
209 |
-
device = "cuda"
|
210 |
base_model = "black-forest-labs/FLUX.1-dev"
|
211 |
|
212 |
#TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
|
@@ -302,7 +302,7 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
302 |
).images[0]
|
303 |
return final_image
|
304 |
|
305 |
-
@spaces.GPU(
|
306 |
def run_lora(image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, prompt = "", progress=gr.Progress(track_tqdm=True)):
|
307 |
if selected_index is None:
|
308 |
raise gr.Error("You must select a LoRA before proceeding.🧨")
|
|
|
206 |
#--------------------------------------------------Model Initialization-----------------------------------------------------------------------------------------#
|
207 |
|
208 |
dtype = torch.bfloat16
|
209 |
+
device = "cuda"
|
210 |
base_model = "black-forest-labs/FLUX.1-dev"
|
211 |
|
212 |
#TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
|
|
|
302 |
).images[0]
|
303 |
return final_image
|
304 |
|
305 |
+
@spaces.GPU()
|
306 |
def run_lora(image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, prompt = "", progress=gr.Progress(track_tqdm=True)):
|
307 |
if selected_index is None:
|
308 |
raise gr.Error("You must select a LoRA before proceeding.🧨")
|