Spaces:
Sleeping
Sleeping
def FastSAM_points_inference( | |
input, | |
input_size=1024, | |
iou_threshold=0.7, | |
conf_threshold=0.25, | |
better_quality=False, | |
withContours=True, | |
use_retina=True, | |
mask_random_color=True, | |
): | |
global global_points | |
global global_point_label | |
input = Image.fromarray(input) | |
input_size = int(input_size) # 确保 imgsz 是整数 | |
# Thanks for the suggestion by hysts in HuggingFace. | |
w, h = input.size | |
scale = input_size / max(w, h) | |
new_w = int(w * scale) | |
new_h = int(h * scale) | |
input = input.resize((new_w, new_h)) | |
scaled_points = [[int(x * scale) for x in point] for point in global_points] | |
results = FASTSAM_MODEL(input, | |
device=DEVICE, | |
retina_masks=True, | |
iou=iou_threshold, | |
conf=conf_threshold, | |
imgsz=input_size,) | |
results = format_results(results[0], 0) | |
annotations, _ = point_prompt(results, scaled_points, global_point_label, new_h, new_w) | |
annotations = np.array([annotations]) | |
fig = fast_process(annotations=annotations, | |
image=input, | |
device=DEVICE, | |
scale=(1024 // input_size), | |
better_quality=better_quality, | |
mask_random_color=mask_random_color, | |
bbox=None, | |
use_retina=use_retina, | |
withContours=withContours,) | |
global_points = [] | |
global_point_label = [] | |
return fig |