Spaces:
Runtime error
Runtime error
File size: 3,950 Bytes
ffbe0b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import torch
import cv2
import numpy as np
from sam.segment_anything import sam_model_registry, SamPredictor, SamAutomaticMaskGenerator
class Segmentor:
def __init__(self, sam_args):
"""
sam_args:
sam_checkpoint: path of SAM checkpoint
generator_args: args for everything_generator
gpu_id: device
"""
self.device = sam_args["gpu_id"]
self.sam = sam_model_registry[sam_args["model_type"]](checkpoint=sam_args["sam_checkpoint"])
self.sam.to(device=self.device)
self.everything_generator = SamAutomaticMaskGenerator(model=self.sam, **sam_args['generator_args'])
self.interactive_predictor = self.everything_generator.predictor
self.have_embedded = False
@torch.no_grad()
def set_image(self, image):
# calculate the embedding only once per frame.
if not self.have_embedded:
self.interactive_predictor.set_image(image)
self.have_embedded = True
@torch.no_grad()
def interactive_predict(self, prompts, mode, multimask=True):
assert self.have_embedded, 'image embedding for sam need be set before predict.'
if mode == 'point':
masks, scores, logits = self.interactive_predictor.predict(point_coords=prompts['point_coords'],
point_labels=prompts['point_modes'],
multimask_output=multimask)
elif mode == 'mask':
masks, scores, logits = self.interactive_predictor.predict(mask_input=prompts['mask_prompt'],
multimask_output=multimask)
elif mode == 'point_mask':
masks, scores, logits = self.interactive_predictor.predict(point_coords=prompts['point_coords'],
point_labels=prompts['point_modes'],
mask_input=prompts['mask_prompt'],
multimask_output=multimask)
return masks, scores, logits
@torch.no_grad()
def segment_with_click(self, origin_frame, coords, modes, multimask=True):
'''
return:
mask: one-hot
'''
self.set_image(origin_frame)
prompts = {
'point_coords': coords,
'point_modes': modes,
}
masks, scores, logits = self.interactive_predict(prompts, 'point', multimask)
mask, logit = masks[np.argmax(scores)], logits[np.argmax(scores), :, :]
prompts = {
'point_coords': coords,
'point_modes': modes,
'mask_prompt': logit[None, :, :]
}
masks, scores, logits = self.interactive_predict(prompts, 'point_mask', multimask)
mask = masks[np.argmax(scores)]
return mask.astype(np.uint8)
def segment_with_box(self, origin_frame, bbox, reset_image=False):
if reset_image:
self.interactive_predictor.set_image(origin_frame)
else:
self.set_image(origin_frame)
# coord = np.array([[int((bbox[1][0] - bbox[0][0]) / 2.), int((bbox[1][1] - bbox[0][1]) / 2)]])
# point_label = np.array([1])
masks, scores, logits = self.interactive_predictor.predict(
point_coords=None,
point_labels=None,
box=np.array([bbox[0][0], bbox[0][1], bbox[1][0], bbox[1][1]]),
multimask_output=True
)
mask, logit = masks[np.argmax(scores)], logits[np.argmax(scores), :, :]
masks, scores, logits = self.interactive_predictor.predict(
point_coords=None,
point_labels=None,
box=np.array([[bbox[0][0], bbox[0][1], bbox[1][0], bbox[1][1]]]),
mask_input=logit[None, :, :],
multimask_output=True
)
mask = masks[np.argmax(scores)]
return [mask]
|