Spaces:
Sleeping
Sleeping
File size: 1,564 Bytes
2e64de9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
def FastSAM_points_inference(
input,
input_size=1024,
iou_threshold=0.7,
conf_threshold=0.25,
better_quality=False,
withContours=True,
use_retina=True,
mask_random_color=True,
):
global global_points
global global_point_label
input = Image.fromarray(input)
input_size = int(input_size) # 确保 imgsz 是整数
# Thanks for the suggestion by hysts in HuggingFace.
w, h = input.size
scale = input_size / max(w, h)
new_w = int(w * scale)
new_h = int(h * scale)
input = input.resize((new_w, new_h))
scaled_points = [[int(x * scale) for x in point] for point in global_points]
results = FASTSAM_MODEL(input,
device=DEVICE,
retina_masks=True,
iou=iou_threshold,
conf=conf_threshold,
imgsz=input_size,)
results = format_results(results[0], 0)
annotations, _ = point_prompt(results, scaled_points, global_point_label, new_h, new_w)
annotations = np.array([annotations])
fig = fast_process(annotations=annotations,
image=input,
device=DEVICE,
scale=(1024 // input_size),
better_quality=better_quality,
mask_random_color=mask_random_color,
bbox=None,
use_retina=use_retina,
withContours=withContours,)
global_points = []
global_point_label = []
return fig |