Abijith commited on
Commit
9c40057
·
1 Parent(s): b377691

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -74,17 +74,17 @@ def clipseg_prediction(image):
74
 
75
  # Vehicle checking
76
  if bbox_area(vehicle_bbox) > bbox_area(damage_bbox):
77
- return True, [bbox_normalization(damage_bbox, img_w, img_h)]
78
  else:
79
  return False, [[]]
80
 
81
 
82
  @torch.no_grad()
83
- def foward_pass(image_input: np.ndarray, points: List[List[int]]) -> np.ndarray:
84
  print('SAM-Segmentation-started------->')
85
  global cache_data
86
  image_input = Image.fromarray(image_input)
87
- inputs = processor(image_input, input_points=points, return_tensors="pt").to(device)
88
  if not cache_data or not torch.equal(inputs['pixel_values'],cache_data[0]):
89
  embedding = model.get_image_embeddings(inputs["pixel_values"])
90
  pixels = inputs["pixel_values"]
 
74
 
75
  # Vehicle checking
76
  if bbox_area(vehicle_bbox) > bbox_area(damage_bbox):
77
+ return True, bbox_normalization(damage_bbox, img_w, img_h)
78
  else:
79
  return False, [[]]
80
 
81
 
82
  @torch.no_grad()
83
+ def foward_pass(image_input: np.ndarray, points: List[int]) -> np.ndarray:
84
  print('SAM-Segmentation-started------->')
85
  global cache_data
86
  image_input = Image.fromarray(image_input)
87
+ inputs = processor(image_input, input_boxes=points, return_tensors="pt").to(device)
88
  if not cache_data or not torch.equal(inputs['pixel_values'],cache_data[0]):
89
  embedding = model.get_image_embeddings(inputs["pixel_values"])
90
  pixels = inputs["pixel_values"]