sandrawang1031 commited on
Commit
bf6948f
1 Parent(s): eca813c
Files changed (2) hide show
  1. model.py +1 -4
  2. requirements.txt +2 -1
model.py CHANGED
@@ -4,6 +4,7 @@ import matplotlib.patches as mpatches
4
  from matplotlib import cm
5
 
6
  from PIL import Image
 
7
 
8
  import torch
9
  from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
@@ -108,9 +109,6 @@ class VirtualStagingToolV2():
108
  init_image = init_image.resize((512, 512)).convert("RGB")
109
  mask_image = mask_image.resize((512, 512)).convert("RGB")
110
 
111
- display(init_image)
112
- display(mask_image)
113
-
114
  output_images = self.diffution_pipeline(
115
  prompt=prompt, image=init_image, mask_image=mask_image,
116
  # width=width, height=height,
@@ -141,7 +139,6 @@ class VirtualStagingToolV2():
141
 
142
  final_output_images = []
143
  for output_image in output_images:
144
- display(output_image)
145
  output_image = output_image.resize(init_image.size)
146
  final_output_images.append(output_image)
147
  return final_output_images, transparent_mask_image
 
4
  from matplotlib import cm
5
 
6
  from PIL import Image
7
+ import numpy as np
8
 
9
  import torch
10
  from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
 
109
  init_image = init_image.resize((512, 512)).convert("RGB")
110
  mask_image = mask_image.resize((512, 512)).convert("RGB")
111
 
 
 
 
112
  output_images = self.diffution_pipeline(
113
  prompt=prompt, image=init_image, mask_image=mask_image,
114
  # width=width, height=height,
 
139
 
140
  final_output_images = []
141
  for output_image in output_images:
 
142
  output_image = output_image.resize(init_image.size)
143
  final_output_images.append(output_image)
144
  return final_output_images, transparent_mask_image
requirements.txt CHANGED
@@ -3,4 +3,5 @@ torch==1.11.0
3
  diffusers==0.16.1
4
  accelerate==0.19.0
5
  matplotlib==3.6.2
6
- pillow==9.2.0
 
 
3
  diffusers==0.16.1
4
  accelerate==0.19.0
5
  matplotlib==3.6.2
6
+ pillow==9.2.0
7
+ numpy==1.23.2