ariG23498 HF staff commited on
Commit
1bc459c
·
verified ·
1 Parent(s): 2eef325

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -9
app.py CHANGED
@@ -5,13 +5,17 @@ import spaces
5
  from diffusers import FluxControlPipeline, FluxTransformer2DModel
6
 
7
  ####################################
8
- # Load the model(s) on CPU #
9
  ####################################
10
- path = "sayakpaul/FLUX.1-dev-edit-v0"
11
  edit_transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16)
12
  pipeline = FluxControlPipeline.from_pretrained(
13
  "black-forest-labs/FLUX.1-dev", transformer=edit_transformer, torch_dtype=torch.bfloat16
14
  ).to("cuda")
 
 
 
 
15
 
16
  #####################################
17
  # The function for our Gradio app #
@@ -24,14 +28,14 @@ def generate(prompt, input_image):
24
  """
25
  # Perform inference
26
  output_image = pipeline(
27
- control_image=input_image,
28
  prompt=prompt,
29
- guidance_scale=30.0,
30
- num_inference_steps=50,
31
  max_sequence_length=512,
32
- height=input_image.height,
33
- width=input_image.width,
34
- generator=torch.manual_seed(0),
35
  ).images[0]
36
 
37
  return output_image
@@ -41,7 +45,7 @@ def launch_app():
41
  with gr.Blocks() as demo:
42
  gr.Markdown(
43
  """
44
- # Flux Control Editing
45
 
46
  This demo uses the [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)
47
  pipeline with an edit transformer from [Sayak Paul](https://huggingface.co/sayakpaul).
 
5
  from diffusers import FluxControlPipeline, FluxTransformer2DModel
6
 
7
  ####################################
8
+ # Load the model(s) on GPU #
9
  ####################################
10
+ path = "sayakpaul/FLUX.1-dev-edit-v0"
11
  edit_transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16)
12
  pipeline = FluxControlPipeline.from_pretrained(
13
  "black-forest-labs/FLUX.1-dev", transformer=edit_transformer, torch_dtype=torch.bfloat16
14
  ).to("cuda")
15
+ pipeline.load_lora_weights(
16
+ hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd"
17
+ )
18
+ pipeline.set_adapters(["hyper-sd"], adapter_weights=[0.125])
19
 
20
  #####################################
21
  # The function for our Gradio app #
 
28
  """
29
  # Perform inference
30
  output_image = pipeline(
31
+ control_image=image,
32
  prompt=prompt,
33
+ guidance_scale=30.,
34
+ num_inference_steps=8,
35
  max_sequence_length=512,
36
+ height=image.height,
37
+ width=image.width,
38
+ generator=torch.manual_seed(0)
39
  ).images[0]
40
 
41
  return output_image
 
45
  with gr.Blocks() as demo:
46
  gr.Markdown(
47
  """
48
+ # Flux Control Editing 🖌️
49
 
50
  This demo uses the [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)
51
  pipeline with an edit transformer from [Sayak Paul](https://huggingface.co/sayakpaul).