ariG23498 commited on
Commit
14e72bd
1 Parent(s): 5fa34bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -8,8 +8,8 @@ from transformers import (
8
  AutoTokenizer,
9
  BlipForConditionalGeneration,
10
  BlipProcessor,
11
- OwlViTForObjectDetection,
12
- OwlViTProcessor,
13
  SamModel,
14
  SamProcessor,
15
  )
@@ -85,8 +85,8 @@ def run_image_captioner(image, device):
85
  def run_segmentation(image, object_to_segment, device):
86
  # OWL-V2 for object detection
87
  owl_v2_model_id = "google/owlv2-base-patch16-ensemble"
88
- processor = Owlv2Processor.from_pretrained(owl_vit_model_id)
89
- od_model = Owlv2ForObjectDetection.from_pretrained(owl_vit_model_id).to(device)
90
  text_queries = [object_to_segment]
91
  inputs = processor(text=text_queries, images=image, return_tensors="pt").to(device)
92
  with torch.no_grad():
 
8
  AutoTokenizer,
9
  BlipForConditionalGeneration,
10
  BlipProcessor,
11
+ OwlV2ForObjectDetection,
12
+ OwlV2Processor,
13
  SamModel,
14
  SamProcessor,
15
  )
 
85
  def run_segmentation(image, object_to_segment, device):
86
  # OWL-V2 for object detection
87
  owl_v2_model_id = "google/owlv2-base-patch16-ensemble"
88
+ processor = Owlv2Processor.from_pretrained(owl_v2_model_id)
89
+ od_model = Owlv2ForObjectDetection.from_pretrained(owl_v2_model_id).to(device)
90
  text_queries = [object_to_segment]
91
  inputs = processor(text=text_queries, images=image, return_tensors="pt").to(device)
92
  with torch.no_grad():