nvn04 commited on
Commit
8a94661
·
verified ·
1 Parent(s): 48fa3db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -74
app.py CHANGED
@@ -229,82 +229,83 @@ def submit_function(
229
  return new_result_image
230
 
231
 
232
- @spaces.GPU(duration=120)
233
- def submit_function_flux(
234
- person_image,
235
- cloth_image,
236
- cloth_type,
237
- num_inference_steps,
238
- guidance_scale,
239
- seed,
240
- show_type
241
- ):
242
-
243
- # Process image editor input
244
- person_image, mask = person_image["background"], person_image["layers"][0]
245
- mask = Image.open(mask).convert("L")
246
- if len(np.unique(np.array(mask))) == 1:
247
- mask = None
248
- else:
249
- mask = np.array(mask)
250
- mask[mask > 0] = 255
251
- mask = Image.fromarray(mask)
252
-
253
- # Set random seed
254
- generator = None
255
- if seed != -1:
256
- generator = torch.Generator(device='cuda').manual_seed(seed)
257
-
258
- # Process input images
259
- person_image = Image.open(person_image).convert("RGB")
260
- cloth_image = Image.open(cloth_image).convert("RGB")
261
 
262
- # Adjust image sizes
263
- person_image = resize_and_crop(person_image, (args.width, args.height))
264
- cloth_image = resize_and_padding(cloth_image, (args.width, args.height))
265
-
266
- # Process mask
267
- if mask is not None:
268
- mask = resize_and_crop(mask, (args.width, args.height))
269
- else:
270
- mask = automasker(
271
- person_image,
272
- cloth_type
273
- )['mask']
274
- mask = mask_processor.blur(mask, blur_factor=9)
275
-
276
- # Inference
277
- result_image = pipeline_flux(
278
- image=person_image,
279
- condition_image=cloth_image,
280
- mask_image=mask,
281
- width=args.width,
282
- height=args.height,
283
- num_inference_steps=num_inference_steps,
284
- guidance_scale=guidance_scale,
285
- generator=generator
286
- ).images[0]
287
-
288
- # Post-processing
289
- masked_person = vis_mask(person_image, mask)
290
-
291
- # Return result based on show type
292
- if show_type == "result only":
293
- return result_image
294
- else:
295
- width, height = person_image.size
296
- if show_type == "input & result":
297
- condition_width = width // 2
298
- conditions = image_grid([person_image, cloth_image], 2, 1)
299
- else:
300
- condition_width = width // 3
301
- conditions = image_grid([person_image, masked_person, cloth_image], 3, 1)
302
 
303
- conditions = conditions.resize((condition_width, height), Image.NEAREST)
304
- new_result_image = Image.new("RGB", (width + condition_width + 5, height))
305
- new_result_image.paste(conditions, (0, 0))
306
- new_result_image.paste(result_image, (condition_width + 5, 0))
307
- return new_result_image
 
308
 
309
 
310
  def person_example_fn(image_path):
 
229
  return new_result_image
230
 
231
 
232
+ # @spaces.GPU(duration=120)
233
+ # def submit_function_flux(
234
+ # person_image,
235
+ # cloth_image,
236
+ # cloth_type,
237
+ # num_inference_steps,
238
+ # guidance_scale,
239
+ # seed,
240
+ # show_type
241
+ # ):
242
+
243
+ # # Process image editor input
244
+ # person_image, mask = person_image["background"], person_image["layers"][0]
245
+ # mask = Image.open(mask).convert("L")
246
+ # if len(np.unique(np.array(mask))) == 1:
247
+ # mask = None
248
+ # else:
249
+ # mask = np.array(mask)
250
+ # mask[mask > 0] = 255
251
+ # mask = Image.fromarray(mask)
252
+
253
+ # # Set random seed
254
+ # generator = None
255
+ # if seed != -1:
256
+ # generator = torch.Generator(device='cuda').manual_seed(seed)
257
+
258
+ # # Process input images
259
+ # person_image = Image.open(person_image).convert("RGB")
260
+ # cloth_image = Image.open(cloth_image).convert("RGB")
261
 
262
+ # # Adjust image sizes
263
+ # person_image = resize_and_crop(person_image, (args.width, args.height))
264
+ # cloth_image = resize_and_padding(cloth_image, (args.width, args.height))
265
+
266
+ # # Process mask
267
+ # if mask is not None:
268
+ # mask = resize_and_crop(mask, (args.width, args.height))
269
+ # else:
270
+ # mask = automasker(
271
+ # person_image,
272
+ # cloth_type
273
+ # )['mask']
274
+ # mask = mask_processor.blur(mask, blur_factor=9)
275
+
276
+ # # Inference
277
+ # result_image = pipeline_flux(
278
+ # image=person_image,
279
+ # condition_image=cloth_image,
280
+ # mask_image=mask,
281
+ # width=args.width,
282
+ # height=args.height,
283
+ # num_inference_steps=num_inference_steps,
284
+ # guidance_scale=guidance_scale,
285
+ # generator=generator
286
+ # ).images[0]
287
+
288
+ # # Post-processing
289
+ # masked_person = vis_mask(person_image, mask)
290
+
291
+ # # Return result based on show type
292
+ # if show_type == "result only":
293
+ # return result_image
294
+ # else:
295
+ # width, height = person_image.size
296
+ # if show_type == "input & result":
297
+ # condition_width = width // 2
298
+ # conditions = image_grid([person_image, cloth_image], 2, 1)
299
+ # else:
300
+ # condition_width = width // 3
301
+ # conditions = image_grid([person_image, masked_person, cloth_image], 3, 1)
302
 
303
+ # conditions = conditions.resize((condition_width, height), Image.NEAREST)
304
+ # new_result_image = Image.new("RGB", (width + condition_width + 5, height))
305
+ # new_result_image.paste(conditions, (0, 0))
306
+ # new_result_image.paste(result_image, (condition_width + 5, 0))
307
+ # return new_result_image
308
+
309
 
310
 
311
  def person_example_fn(image_path):