Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -74,17 +74,17 @@ def clipseg_prediction(image):
|
|
74 |
|
75 |
# Vehicle checking
|
76 |
if bbox_area(vehicle_bbox) > bbox_area(damage_bbox):
|
77 |
-
return True,
|
78 |
else:
|
79 |
return False, [[]]
|
80 |
|
81 |
|
82 |
@torch.no_grad()
|
83 |
-
def foward_pass(image_input: np.ndarray, points: List[
|
84 |
print('SAM-Segmentation-started------->')
|
85 |
global cache_data
|
86 |
image_input = Image.fromarray(image_input)
|
87 |
-
inputs = processor(image_input,
|
88 |
if not cache_data or not torch.equal(inputs['pixel_values'],cache_data[0]):
|
89 |
embedding = model.get_image_embeddings(inputs["pixel_values"])
|
90 |
pixels = inputs["pixel_values"]
|
|
|
74 |
|
75 |
# Vehicle checking
|
76 |
if bbox_area(vehicle_bbox) > bbox_area(damage_bbox):
|
77 |
+
return True, bbox_normalization(damage_bbox, img_w, img_h)
|
78 |
else:
|
79 |
return False, [[]]
|
80 |
|
81 |
|
82 |
@torch.no_grad()
|
83 |
+
def foward_pass(image_input: np.ndarray, points: List[int]) -> np.ndarray:
|
84 |
print('SAM-Segmentation-started------->')
|
85 |
global cache_data
|
86 |
image_input = Image.fromarray(image_input)
|
87 |
+
inputs = processor(image_input, input_boxes=points, return_tensors="pt").to(device)
|
88 |
if not cache_data or not torch.equal(inputs['pixel_values'],cache_data[0]):
|
89 |
embedding = model.get_image_embeddings(inputs["pixel_values"])
|
90 |
pixels = inputs["pixel_values"]
|