fantaxy commited on
Commit
c2d0882
·
verified ·
1 Parent(s): c2a5579

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -55
app.py CHANGED
@@ -19,37 +19,53 @@ css = """
19
  }
20
  """
21
 
 
22
  if torch.cuda.is_available():
23
  power_device = "GPU"
24
- device = "cuda"
 
25
  else:
26
  power_device = "CPU"
27
  device = "cpu"
28
-
29
 
30
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
31
 
32
  model_path = snapshot_download(
33
- repo_id="black-forest-labs/FLUX.1-dev",
34
- repo_type="model",
35
  ignore_patterns=["*.md", "*..gitattributes"],
36
- local_dir="FLUX.1-dev",
37
- token=huggingface_token, # type a new token-id.
38
  )
39
 
40
-
41
- # Load pipeline
42
  controlnet = FluxControlNetModel.from_pretrained(
43
- "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
 
44
  ).to(device)
 
45
  pipe = FluxControlNetPipeline.from_pretrained(
46
- model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
 
 
47
  )
48
  pipe.to(device)
49
 
 
 
 
 
50
  MAX_SEED = 1000000
51
- MAX_PIXEL_BUDGET = 1024 * 1024
52
 
 
 
 
 
 
 
 
53
 
54
  def process_input(input_image, upscale_factor, **kwargs):
55
  w, h = input_image.size
@@ -80,8 +96,7 @@ def process_input(input_image, upscale_factor, **kwargs):
80
 
81
  return input_image.resize((w, h)), w_original, h_original, was_resized
82
 
83
-
84
- @spaces.GPU#(duration=42)
85
  def infer(
86
  seed,
87
  randomize_seed,
@@ -91,46 +106,60 @@ def infer(
91
  controlnet_conditioning_scale,
92
  progress=gr.Progress(track_tqdm=True),
93
  ):
94
- if randomize_seed:
95
- seed = random.randint(0, MAX_SEED)
96
- true_input_image = input_image
97
- input_image, w_original, h_original, was_resized = process_input(
98
- input_image, upscale_factor
99
- )
100
-
101
- # rescale with upscale factor
102
- w, h = input_image.size
103
- control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
104
-
105
- generator = torch.Generator().manual_seed(seed)
106
-
107
- gr.Info("Upscaling image...")
108
- image = pipe(
109
- prompt="",
110
- control_image=control_image,
111
- controlnet_conditioning_scale=controlnet_conditioning_scale,
112
- num_inference_steps=num_inference_steps,
113
- guidance_scale=3.5,
114
- height=control_image.size[1],
115
- width=control_image.size[0],
116
- generator=generator,
117
- ).images[0]
118
-
119
- if was_resized:
120
- gr.Info(
121
- f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
122
  )
123
 
124
- # resize to target desired size
125
- image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
126
- image.save("output.jpg")
127
- # convert to numpy
128
- return [true_input_image, image, seed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
 
 
 
 
130
 
131
- with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
132
-
 
 
 
 
 
 
133
 
 
134
  with gr.Row():
135
  run_button = gr.Button(value="Run")
136
 
@@ -148,9 +177,9 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
148
  upscale_factor = gr.Slider(
149
  label="Upscale Factor",
150
  minimum=1,
151
- maximum=4,
152
  step=1,
153
- value=4,
154
  )
155
  controlnet_conditioning_scale = gr.Slider(
156
  label="Controlnet Conditioning Scale",
@@ -174,9 +203,8 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
174
 
175
  examples = gr.Examples(
176
  examples=[
177
- [42, False, "z1.webp", 28, 4, 0.6],
178
- [42, False, "z2.webp", 28, 4, 0.6],
179
-
180
  ],
181
  inputs=[
182
  seed,
@@ -204,7 +232,6 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
204
  ],
205
  outputs=result,
206
  show_api=False,
207
- # show_progress="minimal",
208
  )
209
 
210
- demo.queue().launch(share=False)
 
19
  }
20
  """
21
 
22
+ # Device and dtype setup
23
  if torch.cuda.is_available():
24
  power_device = "GPU"
25
+ device = "cuda"
26
+ dtype = torch.bfloat16
27
  else:
28
  power_device = "CPU"
29
  device = "cpu"
30
+ dtype = torch.float32
31
 
32
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
33
 
34
  model_path = snapshot_download(
35
+ repo_id="black-forest-labs/FLUX.1-dev",
36
+ repo_type="model",
37
  ignore_patterns=["*.md", "*..gitattributes"],
38
+ local_dir="FLUX.1-dev",
39
+ token=huggingface_token,
40
  )
41
 
42
+ # Load pipeline with memory optimizations
 
43
  controlnet = FluxControlNetModel.from_pretrained(
44
+ "jasperai/Flux.1-dev-Controlnet-Upscaler",
45
+ torch_dtype=dtype
46
  ).to(device)
47
+
48
  pipe = FluxControlNetPipeline.from_pretrained(
49
+ model_path,
50
+ controlnet=controlnet,
51
+ torch_dtype=dtype
52
  )
53
  pipe.to(device)
54
 
55
+ # Enable memory optimizations
56
+ pipe.enable_model_cpu_offload()
57
+ pipe.enable_attention_slicing()
58
+
59
  MAX_SEED = 1000000
60
+ MAX_PIXEL_BUDGET = 512 * 512 # Reduced from 1024 * 1024
61
 
62
+ def check_resources():
63
+ if torch.cuda.is_available():
64
+ gpu_memory = torch.cuda.get_device_properties(0).total_memory
65
+ memory_allocated = torch.cuda.memory_allocated(0)
66
+ if memory_allocated/gpu_memory > 0.9: # 90% threshold
67
+ return False
68
+ return True
69
 
70
  def process_input(input_image, upscale_factor, **kwargs):
71
  w, h = input_image.size
 
96
 
97
  return input_image.resize((w, h)), w_original, h_original, was_resized
98
 
99
+ @spaces.GPU
 
100
  def infer(
101
  seed,
102
  randomize_seed,
 
106
  controlnet_conditioning_scale,
107
  progress=gr.Progress(track_tqdm=True),
108
  ):
109
+ try:
110
+ if not check_resources():
111
+ gr.Warning("System resources are running low. Try reducing parameters.")
112
+ return None
113
+
114
+ if device == "cuda":
115
+ torch.cuda.empty_cache()
116
+
117
+ if randomize_seed:
118
+ seed = random.randint(0, MAX_SEED)
119
+
120
+ true_input_image = input_image
121
+ input_image, w_original, h_original, was_resized = process_input(
122
+ input_image, upscale_factor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  )
124
 
125
+ # rescale with upscale factor
126
+ w, h = input_image.size
127
+ control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
128
+
129
+ generator = torch.Generator().manual_seed(seed)
130
+
131
+ gr.Info("Upscaling image...")
132
+ image = pipe(
133
+ prompt="",
134
+ control_image=control_image,
135
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
136
+ num_inference_steps=num_inference_steps,
137
+ guidance_scale=3.5,
138
+ height=control_image.size[1],
139
+ width=control_image.size[0],
140
+ generator=generator,
141
+ ).images[0]
142
+
143
+ if was_resized:
144
+ gr.Info(
145
+ f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
146
+ )
147
 
148
+ # resize to target desired size
149
+ image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
150
+ image.save("output.jpg")
151
+ return [true_input_image, image, seed]
152
 
153
+ except RuntimeError as e:
154
+ if "out of memory" in str(e):
155
+ gr.Warning("Not enough GPU memory. Try reducing the upscale factor or image size.")
156
+ return None
157
+ raise e
158
+ except Exception as e:
159
+ gr.Error(f"An error occurred: {str(e)}")
160
+ return None
161
 
162
+ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
163
  with gr.Row():
164
  run_button = gr.Button(value="Run")
165
 
 
177
  upscale_factor = gr.Slider(
178
  label="Upscale Factor",
179
  minimum=1,
180
+ maximum=2, # Reduced from 4
181
  step=1,
182
+ value=2, # Reduced default
183
  )
184
  controlnet_conditioning_scale = gr.Slider(
185
  label="Controlnet Conditioning Scale",
 
203
 
204
  examples = gr.Examples(
205
  examples=[
206
+ [42, False, "z1.webp", 28, 2, 0.6], # Updated upscale factor
207
+ [42, False, "z2.webp", 28, 2, 0.6], # Updated upscale factor
 
208
  ],
209
  inputs=[
210
  seed,
 
232
  ],
233
  outputs=result,
234
  show_api=False,
 
235
  )
236
 
237
+ demo.queue().launch(share=False)