kxhit commited on
Commit
bb54316
1 Parent(s): 9c3b9db

move sam to main

Browse files
Files changed (2) hide show
  1. app.py +20 -2
  2. dust3r/utils/image.py +1 -16
app.py CHANGED
@@ -72,6 +72,8 @@ from diffusers import DDIMScheduler
72
  from dataset import get_pose
73
  from CN_encoder import CN_encoder
74
  from pipeline_zero1to3 import Zero1to3StableDiffusionPipeline
 
 
75
 
76
  pretrained_model_name_or_path = "kxic/EscherNet_demo"
77
  resolution = 256
@@ -115,6 +117,22 @@ pipeline.enable_vae_slicing()
115
  # pipeline.enable_xformers_memory_efficient_attention()
116
 
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  @spaces.GPU(duration=120)
119
  def run_eschernet(eschernet_input_dict, sample_steps, sample_seed, nvs_num, nvs_mode):
120
  # set the random seed
@@ -330,7 +348,7 @@ def get_reconstructed_scene(filelist, schedule, niter, min_conf_thr,
330
  if os.path.exists(outdir):
331
  shutil.rmtree(outdir)
332
  os.makedirs(outdir, exist_ok=True)
333
- imgs, imgs_rgba = load_images(filelist, size=image_size, verbose=not silent, do_remove_background=True)
334
  if len(imgs) == 1:
335
  imgs = [imgs[0], copy.deepcopy(imgs[0])]
336
  imgs[1]['idx'] = 1
@@ -544,7 +562,7 @@ Image views are treated as tokens and the camera pose is encoded by <b>CaPE (Cam
544
 
545
  <h4><b>Tips:</b></h4>
546
 
547
- - Our model can take <b>any number input images</b>. The more images you provide, the better the results.
548
 
549
  - Our model can generate <b>any number and any pose</b> novel views. You can specify the number of views you want to generate. In this demo, we set novel views on an <b>archemedian spiral</b> for simplicity.
550
 
 
72
  from dataset import get_pose
73
  from CN_encoder import CN_encoder
74
  from pipeline_zero1to3 import Zero1to3StableDiffusionPipeline
75
+ from segment_anything import sam_model_registry, SamPredictor
76
+ import rembg
77
 
78
  pretrained_model_name_or_path = "kxic/EscherNet_demo"
79
  resolution = 256
 
117
  # pipeline.enable_xformers_memory_efficient_attention()
118
 
119
 
120
+ #### object segmentation
121
+ def sam_init():
122
+ sam_checkpoint = os.path.join("./sam_pt/sam_vit_h_4b8939.pth")
123
+ if os.path.exists(sam_checkpoint) is False:
124
+ os.system("wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth -P ./sam_pt/")
125
+ model_type = "vit_h"
126
+
127
+ sam = sam_model_registry[model_type](checkpoint=sam_checkpoint).to(device=device)
128
+ predictor = SamPredictor(sam)
129
+ return predictor
130
+
131
+ rembg_session = rembg.new_session()
132
+ predictor = sam_init()
133
+
134
+
135
+
136
  @spaces.GPU(duration=120)
137
  def run_eschernet(eschernet_input_dict, sample_steps, sample_seed, nvs_num, nvs_mode):
138
  # set the random seed
 
348
  if os.path.exists(outdir):
349
  shutil.rmtree(outdir)
350
  os.makedirs(outdir, exist_ok=True)
351
+ imgs, imgs_rgba = load_images(filelist, size=image_size, verbose=not silent, do_remove_background=True, rembg_session=rembg_session, predictor=predictor)
352
  if len(imgs) == 1:
353
  imgs = [imgs[0], copy.deepcopy(imgs[0])]
354
  imgs[1]['idx'] = 1
 
562
 
563
  <h4><b>Tips:</b></h4>
564
 
565
+ - Our model can take <b>any number input images</b>. The more images you provide (>=3 for this demo), the better the results.
566
 
567
  - Our model can generate <b>any number and any pose</b> novel views. You can specify the number of views you want to generate. In this demo, we set novel views on an <b>archemedian spiral</b> for simplicity.
568
 
dust3r/utils/image.py CHANGED
@@ -12,23 +12,9 @@ from PIL.ImageOps import exif_transpose
12
  import torchvision.transforms as tvf
13
  os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
14
  import cv2 # noqa
15
-
16
- import rembg
17
- rembg_session = rembg.new_session()
18
-
19
  import time
20
  from PIL import Image
21
  from rembg import remove
22
- from segment_anything import sam_model_registry, SamPredictor
23
- def sam_init():
24
- sam_checkpoint = os.path.join("./sam_pt/sam_vit_h_4b8939.pth")
25
- if os.path.exists(sam_checkpoint) is False:
26
- os.system("wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth -P ./sam_pt/")
27
- model_type = "vit_h"
28
-
29
- sam = sam_model_registry[model_type](checkpoint=sam_checkpoint).to(device=f"cuda:{0 if torch.cuda.is_available() else 'cpu'}")
30
- predictor = SamPredictor(sam)
31
- return predictor
32
 
33
  def sam_segment(predictor, input_image, *bbox_coords):
34
  bbox = np.array(bbox_coords)
@@ -50,7 +36,6 @@ def sam_segment(predictor, input_image, *bbox_coords):
50
  torch.cuda.empty_cache()
51
  return Image.fromarray(out_image_bbox, mode='RGBA')
52
 
53
- predictor = sam_init()
54
 
55
  try:
56
  from pillow_heif import register_heif_opener # noqa
@@ -103,7 +88,7 @@ def _resize_pil_image(img, long_edge_size):
103
  new_size = tuple(int(round(x*long_edge_size/S)) for x in img.size)
104
  return img.resize(new_size, interp)
105
 
106
- def load_images(folder_or_list, size, square_ok=False, verbose=True, do_remove_background=True, rembg_session=None):
107
  """ open and convert all images in a list or folder to proper input format for DUSt3R
108
  """
109
  if isinstance(folder_or_list, str):
 
12
  import torchvision.transforms as tvf
13
  os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
14
  import cv2 # noqa
 
 
 
 
15
  import time
16
  from PIL import Image
17
  from rembg import remove
 
 
 
 
 
 
 
 
 
 
18
 
19
  def sam_segment(predictor, input_image, *bbox_coords):
20
  bbox = np.array(bbox_coords)
 
36
  torch.cuda.empty_cache()
37
  return Image.fromarray(out_image_bbox, mode='RGBA')
38
 
 
39
 
40
  try:
41
  from pillow_heif import register_heif_opener # noqa
 
88
  new_size = tuple(int(round(x*long_edge_size/S)) for x in img.size)
89
  return img.resize(new_size, interp)
90
 
91
+ def load_images(folder_or_list, size, square_ok=False, verbose=True, do_remove_background=True, rembg_session=None, predictor=None):
92
  """ open and convert all images in a list or folder to proper input format for DUSt3R
93
  """
94
  if isinstance(folder_or_list, str):