yizhangliu commited on
Commit
ea1e63c
1 Parent(s): 9de2d65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -2
app.py CHANGED
@@ -120,7 +120,7 @@ def load_img(nparr, gray: bool = False):
120
  return np_img, alpha_channel
121
 
122
  model = None
123
- def model_process(image, mask):
124
  global model
125
 
126
  # input = request.files
@@ -218,6 +218,98 @@ def model_process(image, mask):
218
  return ext
219
  '''
220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  model = ModelManager(
222
  name='lama',
223
  device=device,
@@ -258,7 +350,7 @@ def predict(dict):
258
  # mask = dict["mask"] # .convert("RGB") #.resize((512, 512))
259
  '''
260
 
261
- output = model_process(dict["image"], dict["mask"])
262
  # output = mask #output.images[0]
263
  # output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
264
 
 
120
  return np_img, alpha_channel
121
 
122
  model = None
123
+ def model_process_1(image, mask):
124
  global model
125
 
126
  # input = request.files
 
218
  return ext
219
  '''
220
 
221
+ def model_process(input) #image, mask):
222
+ global model
223
+ # input = request.files
224
+ # RGB
225
+ origin_image_bytes = input["image"].read()
226
+ print(f'origin_image_bytes = ', type(origin_image_bytes), len(origin_image_bytes))
227
+
228
+ image, alpha_channel = load_img(origin_image_bytes)
229
+ original_shape = image.shape
230
+ interpolation = cv2.INTER_CUBIC
231
+
232
+ form = request.form
233
+ print(f'size_limit_1_ = ', form["sizeLimit"], type(input["image"]))
234
+ size_limit: Union[int, str] = form.get("sizeLimit", "1080")
235
+ print(f'size_limit_2_ = {size_limit}')
236
+ if size_limit == "Original":
237
+ size_limit = max(image.shape)
238
+ else:
239
+ size_limit = int(size_limit)
240
+ print(f'size_limit_3_ = {size_limit}')
241
+
242
+ config = Config(
243
+ ldm_steps=form["ldmSteps"],
244
+ ldm_sampler=form["ldmSampler"],
245
+ hd_strategy=form["hdStrategy"],
246
+ zits_wireframe=form["zitsWireframe"],
247
+ hd_strategy_crop_margin=form["hdStrategyCropMargin"],
248
+ hd_strategy_crop_trigger_size=form["hdStrategyCropTrigerSize"],
249
+ hd_strategy_resize_limit=form["hdStrategyResizeLimit"],
250
+ prompt=form["prompt"],
251
+ use_croper=form["useCroper"],
252
+ croper_x=form["croperX"],
253
+ croper_y=form["croperY"],
254
+ croper_height=form["croperHeight"],
255
+ croper_width=form["croperWidth"],
256
+ sd_mask_blur=form["sdMaskBlur"],
257
+ sd_strength=form["sdStrength"],
258
+ sd_steps=form["sdSteps"],
259
+ sd_guidance_scale=form["sdGuidanceScale"],
260
+ sd_sampler=form["sdSampler"],
261
+ sd_seed=form["sdSeed"],
262
+ cv2_flag=form["cv2Flag"],
263
+ cv2_radius=form['cv2Radius']
264
+ )
265
+
266
+ print(f'config/alpha_channel/size_limit = {config} / {alpha_channel} / {size_limit}')
267
+ if config.sd_seed == -1:
268
+ config.sd_seed = random.randint(1, 999999999)
269
+
270
+ logger.info(f"Origin image shape: {original_shape}")
271
+ print(f"Origin image shape: {original_shape} / {image[250][250]}")
272
+ image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
273
+ logger.info(f"Resized image shape: {image.shape} / {type(image)}")
274
+ print(f"Resized image shape: {image.shape} / {image[250][250]}")
275
+
276
+ mask, _ = load_img(input["mask"].read(), gray=True)
277
+ mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
278
+ print(f"mask image shape: {mask.shape} / {type(mask)} / {mask[250][250]}")
279
+
280
+ start = time.time()
281
+ res_np_img = model(image, mask, config)
282
+ logger.info(f"process time: {(time.time() - start) * 1000}ms")
283
+ print(f"process time: {(time.time() - start) * 1000}ms, {res_np_img.shape} / {res_np_img[250][250]}")
284
+
285
+ torch.cuda.empty_cache()
286
+
287
+ if alpha_channel is not None:
288
+ print(f"liuyz_here_1_: {alpha_channel}")
289
+ if alpha_channel.shape[:2] != res_np_img.shape[:2]:
290
+ alpha_channel = cv2.resize(
291
+ alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
292
+ )
293
+ res_np_img = np.concatenate(
294
+ (res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
295
+ )
296
+
297
+ image = Image.fromarray(res_np_img)
298
+ image.save(f'./result_image.png')
299
+ return image
300
+ '''
301
+ ext = get_image_ext(origin_image_bytes)
302
+
303
+ response = make_response(
304
+ send_file(
305
+ io.BytesIO(numpy_to_bytes(res_np_img, ext)),
306
+ mimetype=f"image/{ext}",
307
+ )
308
+ )
309
+ response.headers["X-Seed"] = str(config.sd_seed)
310
+ return response
311
+ '''
312
+
313
  model = ModelManager(
314
  name='lama',
315
  device=device,
 
350
  # mask = dict["mask"] # .convert("RGB") #.resize((512, 512))
351
  '''
352
 
353
+ output = model_process(dict) # dict["image"], dict["mask"])
354
  # output = mask #output.images[0]
355
  # output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
356