Aedelon commited on
Commit
65a3955
1 Parent(s): c9d2810

First Commit

Browse files
.gitattributes CHANGED
@@ -34,3 +34,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *.jit filter=lfs diff=lfs merge=lfs -text
36
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *.jit filter=lfs diff=lfs merge=lfs -text
36
  *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ images/fruits.jpg filter=lfs diff=lfs merge=lfs -text
38
+ images/street.jpg filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import warnings
3
+
4
+ import gradio as gr
5
+ import numpy as np
6
+ from PIL import Image
7
+
8
+ from lang_efficient_sam.LangEfficientSAM import LangEfficientSAM
9
+ from lang_efficient_sam.utils.draw_image import draw_image
10
+
11
+ warnings.filterwarnings("ignore")
12
+
13
+ model = LangEfficientSAM()
14
+
15
+
16
+ def predict(box_threshold, text_threshold, image_path, text_prompt):
17
+ print("Predicting... ", box_threshold, text_threshold, image_path, text_prompt)
18
+
19
+ image_pil = Image.open(image_path).convert("RGB")
20
+
21
+ masks, boxes, phrases, logits = model.predict(image_pil, text_prompt, box_threshold, text_threshold)
22
+
23
+ labels = [f"{phrase} {logit:.2f}" for phrase, logit in zip(phrases, logits)]
24
+
25
+ image_array = np.asarray(image_pil)
26
+ image = draw_image(image_array, masks, boxes, labels)
27
+ image = Image.fromarray(np.uint8(image)).convert("RGB")
28
+
29
+ return image
30
+
31
+
32
+ title = "LangEfficientSAM"
33
+
34
+ inputs = [
35
+ gr.Slider(0, 1, value=0.3, label="Box threshold"),
36
+ gr.Slider(0, 1, value=0.25, label="Text threshold"),
37
+ gr.Image(type="filepath", label='Image'),
38
+ gr.Textbox(lines=1, label="Text Prompt"),
39
+ ]
40
+
41
+ outputs = [gr.Image(type="pil", label="Output Image")]
42
+
43
+ examples = [
44
+ [
45
+ 0.20,
46
+ 0.20,
47
+ os.path.join(os.path.dirname(__file__), "images", "living.jpg"),
48
+ "fabric",
49
+ ],
50
+ [
51
+ 0.36,
52
+ 0.25,
53
+ os.path.join(os.path.dirname(__file__), "images", "fruits.jpg"),
54
+ "apple",
55
+ ],
56
+ [
57
+ 0.20,
58
+ 0.20,
59
+ os.path.join(os.path.dirname(__file__), "images", "street.jpg"),
60
+ "car",
61
+ ]
62
+ ]
63
+
64
+ demo = gr.Interface(fn=predict,
65
+ inputs=inputs,
66
+ outputs=outputs,
67
+ examples=examples,
68
+ title=title)
69
+
70
+ demo.launch(debug=False, share=False)
images/fruits.jpg ADDED

Git LFS Details

  • SHA256: ef4c57e132a168f8bdc8ee6b9fe347f53153f4621c3c3ea03e8c60de95315407
  • Pointer size: 132 Bytes
  • Size of remote file: 2.1 MB
images/living.jpg ADDED
images/street.jpg ADDED

Git LFS Details

  • SHA256: 1cf5bcd19d6af31162de3237e2dc778dfc6053c929286b15947a51275e455c53
  • Pointer size: 132 Bytes
  • Size of remote file: 2.35 MB
lang_efficient_sam/LangEfficientSAM.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import groundingdino.datasets.transforms as T
2
+ import torch
3
+ import numpy as np
4
+ from groundingdino.models import build_model
5
+ from groundingdino.util import box_ops
6
+ from groundingdino.util.inference import predict
7
+ from groundingdino.util.slconfig import SLConfig
8
+ from groundingdino.util.utils import clean_state_dict
9
+ from torchvision.transforms import ToTensor
10
+ from huggingface_hub import hf_hub_download
11
+ import time
12
+
13
+
14
+ def load_model_hugging_face(repo_id, filename, ckpt_config_filename, device='cpu'):
15
+ cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)
16
+
17
+ args = SLConfig.fromfile(cache_config_file)
18
+ model = build_model(args)
19
+ args.device = device
20
+
21
+ cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
22
+ checkpoint = torch.load(cache_file, map_location='cpu')
23
+ log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
24
+ print(f"Model loaded from {cache_file} \n => {log}")
25
+ model.eval()
26
+ return model
27
+
28
+
29
+ class LangEfficientSAM:
30
+ def __init__(self, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
31
+ self.device = device
32
+ print("Device:", self.device)
33
+ if self.device == "cpu":
34
+ self.sam_efficient = torch.jit.load('./models/efficientsam_s_cpu.jit')
35
+ else:
36
+ self.sam_efficient = torch.jit.load('./models/efficientsam_s_gpu.jit')
37
+ ckpt_repo_id = "ShilongLiu/GroundingDINO"
38
+ ckpt_filename = "groundingdino_swinb_cogcoor.pth"
39
+ ckpt_config_filename = "GroundingDINO_SwinB.cfg.py"
40
+ self.groundingdino = load_model_hugging_face(ckpt_repo_id,
41
+ ckpt_filename,
42
+ ckpt_config_filename,
43
+ self.device)
44
+
45
+ def predict_dino(self, image_pil, text_prompt, box_threshold, text_threshold):
46
+ start = time.time()
47
+ transform = T.Compose([
48
+ T.RandomResize([800], max_size=1333),
49
+ T.ToTensor(),
50
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
51
+ ])
52
+ image_transformed, _ = transform(image_pil, None)
53
+
54
+ boxes, logits, phrases = predict(model=self.groundingdino,
55
+ image=image_transformed,
56
+ caption=text_prompt,
57
+ box_threshold=box_threshold,
58
+ text_threshold=text_threshold,
59
+ device=self.device)
60
+ W, H = image_pil.size
61
+ boxes = box_ops.box_cxcywh_to_xyxy(boxes) * torch.Tensor([W, H, W, H])
62
+ # print("DINO time: ", time.time() - start)
63
+
64
+ return boxes, logits, phrases
65
+
66
+ def predict_sam(self, image, box):
67
+ start = time.time()
68
+ img_tensor = ToTensor()(image).to(device=self.device)
69
+ bbox = torch.reshape(box.clone().detach(), [1, 1, 2, 2]).to(device=self.device)
70
+ bbox_labels = torch.reshape(torch.tensor([2, 3]), [1, 1, 2]).to(device=self.device)
71
+
72
+ predicted_logits, predicted_iou = self.sam_efficient(
73
+ img_tensor[None, ...],
74
+ bbox,
75
+ bbox_labels,
76
+ )
77
+ predicted_logits = predicted_logits.cpu()
78
+ all_masks = torch.ge(torch.sigmoid(predicted_logits[0, 0, :, :, :]), 0.5).numpy()
79
+ predicted_iou = predicted_iou[0, 0, ...].cpu().detach().numpy()
80
+
81
+ max_predicted_iou = -1
82
+ selected_mask_using_predicted_iou = None
83
+ for m in range(all_masks.shape[0]):
84
+ curr_predicted_iou = predicted_iou[m]
85
+ if (
86
+ curr_predicted_iou > max_predicted_iou
87
+ or selected_mask_using_predicted_iou is None
88
+ ):
89
+ max_predicted_iou = curr_predicted_iou
90
+ selected_mask_using_predicted_iou = all_masks[m]
91
+
92
+ # print("SAM time: ", time.time() - start)
93
+ return selected_mask_using_predicted_iou
94
+
95
+ def predict(self, image_pil, text_prompt, box_threshold=0.3, text_threshold=0.25):
96
+ boxes, logits, phrases = self.predict_dino(image_pil, text_prompt, box_threshold, text_threshold)
97
+ # masks = torch.tensor([])
98
+ masks = []
99
+ if len(boxes) > 0:
100
+ for box in boxes:
101
+ mask = self.predict_sam(image_pil, box)
102
+ masks.append(mask)
103
+
104
+ masks = np.array(masks)
105
+ masks = torch.from_numpy(masks)
106
+
107
+ return masks, boxes, phrases, logits
lang_efficient_sam/__init__.py ADDED
File without changes
lang_efficient_sam/__pycache__/LangEfficientSAM.cpython-312.pyc ADDED
Binary file (6.25 kB). View file
 
lang_efficient_sam/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (171 Bytes). View file
 
lang_efficient_sam/utils/__init__.py ADDED
File without changes
lang_efficient_sam/utils/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (177 Bytes). View file
 
lang_efficient_sam/utils/__pycache__/draw_image.cpython-312.pyc ADDED
Binary file (1.03 kB). View file
 
lang_efficient_sam/utils/draw_image.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchvision.utils import draw_bounding_boxes
3
+ from torchvision.utils import draw_segmentation_masks
4
+
5
+
6
+ def draw_image(image, masks, boxes, labels, alpha=0.4):
7
+ image = torch.from_numpy(image).permute(2, 0, 1)
8
+ if len(boxes) > 0:
9
+ image = draw_bounding_boxes(image, boxes, colors=['red'] * len(boxes), labels=labels, width=2)
10
+ if len(masks) > 0:
11
+ image = draw_segmentation_masks(image, masks=masks, colors=['cyan'] * len(masks), alpha=alpha)
12
+ return image.numpy().transpose(1, 2, 0)
models/efficientsam_s_cpu.jit ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c37a9cd7fdf97a90f8698f9150333b6f8858e6896f074823dfac7907151551ba
3
+ size 134
models/efficientsam_s_gpu.jit ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2d60aea949baf6ea041a1dfae540145d401da9294e7e7d2074618b3dbd7fa68
3
+ size 134
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ gradio==4.18.0
2
+ groundingdino-py==0.4.0
3
+ matplotlib==3.8.2
4
+ numpy==1.26.4
5
+ opencv-python==4.9.0.80
6
+ pillow==10.2.0
7
+ torch==2.2.0
8
+ torchvision==0.17.0