ariG23498 commited on
Commit
5fa34bb
1 Parent(s): 6cf04fa

updating the detection model from owl-vit to owl-v2

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -83,10 +83,10 @@ def run_image_captioner(image, device):
83
 
84
  @spaces.GPU()
85
  def run_segmentation(image, object_to_segment, device):
86
- # OWL-ViT for object detection
87
- owl_vit_model_id = "google/owlvit-base-patch32"
88
- processor = OwlViTProcessor.from_pretrained(owl_vit_model_id)
89
- od_model = OwlViTForObjectDetection.from_pretrained(owl_vit_model_id).to(device)
90
  text_queries = [object_to_segment]
91
  inputs = processor(text=text_queries, images=image, return_tensors="pt").to(device)
92
  with torch.no_grad():
 
83
 
84
  @spaces.GPU()
85
  def run_segmentation(image, object_to_segment, device):
86
+ # OWL-V2 for object detection
87
+ owl_v2_model_id = "google/owlv2-base-patch16-ensemble"
88
+ processor = Owlv2Processor.from_pretrained(owl_vit_model_id)
89
+ od_model = Owlv2ForObjectDetection.from_pretrained(owl_vit_model_id).to(device)
90
  text_queries = [object_to_segment]
91
  inputs = processor(text=text_queries, images=image, return_tensors="pt").to(device)
92
  with torch.no_grad():