yizhangliu commited on
Commit
bba2454
1 Parent(s): 210411f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -34,7 +34,7 @@ from loguru import logger
34
 
35
  from lama_cleaner.model_manager import ModelManager
36
  from lama_cleaner.schema import Config
37
- '''
38
  try:
39
  torch._C._jit_override_can_fuse_on_cpu(False)
40
  torch._C._jit_override_can_fuse_on_gpu(False)
@@ -104,6 +104,7 @@ def preprocess_mask(mask):
104
  mask = torch.from_numpy(mask)
105
  return mask
106
 
 
107
  def model_process(init_image, mask):
108
  global model
109
 
@@ -117,8 +118,7 @@ def model_process(init_image, mask):
117
  # image, alpha_channel = load_img(origin_image_bytes)
118
  # Origin image shape: (512, 512, 3)
119
  original_shape = init_image.shape
120
- interpolation = cv2.INTER_CUBIC
121
-
122
 
123
  # form = request.form
124
 
@@ -160,7 +160,10 @@ def model_process(init_image, mask):
160
  image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
161
  # logger.info(f"Resized image shape: {image.shape}")
162
  print(f"Resized image shape: {image.shape}")
163
-
 
 
 
164
  mask, _ = load_img(input["mask"].read(), gray=True)
165
  mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
166
 
@@ -182,6 +185,7 @@ def model_process(init_image, mask):
182
  ext = get_image_ext(origin_image_bytes)
183
  return ext
184
 
 
185
  model = ModelManager(
186
  name='lama',
187
  device=device,
@@ -223,7 +227,7 @@ def predict(dict):
223
  '''
224
  image = Image.fromarray(dict["image"])
225
  mask = Image.fromarray(dict["mask"])
226
- #output = model_process(image, mask)
227
  output = mask #output.images[0]
228
  # output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
229
 
 
34
 
35
  from lama_cleaner.model_manager import ModelManager
36
  from lama_cleaner.schema import Config
37
+
38
  try:
39
  torch._C._jit_override_can_fuse_on_cpu(False)
40
  torch._C._jit_override_can_fuse_on_gpu(False)
 
104
  mask = torch.from_numpy(mask)
105
  return mask
106
 
107
+ model = None
108
  def model_process(init_image, mask):
109
  global model
110
 
 
118
  # image, alpha_channel = load_img(origin_image_bytes)
119
  # Origin image shape: (512, 512, 3)
120
  original_shape = init_image.shape
121
+ interpolation = cv2.INTER_CUBIC
 
122
 
123
  # form = request.form
124
 
 
160
  image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
161
  # logger.info(f"Resized image shape: {image.shape}")
162
  print(f"Resized image shape: {image.shape}")
163
+
164
+ if model is None:
165
+ return None
166
+
167
  mask, _ = load_img(input["mask"].read(), gray=True)
168
  mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
169
 
 
185
  ext = get_image_ext(origin_image_bytes)
186
  return ext
187
 
188
+ '''
189
  model = ModelManager(
190
  name='lama',
191
  device=device,
 
227
  '''
228
  image = Image.fromarray(dict["image"])
229
  mask = Image.fromarray(dict["mask"])
230
+ output1 = model_process(dict["image"], dict["mask"])
231
  output = mask #output.images[0]
232
  # output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
233