nielsr HF staff commited on
Commit
cf2fda3
1 Parent(s): f8a3bb9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -4
README.md CHANGED
@@ -50,18 +50,16 @@ outputs = model(**inputs)
50
  # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
51
  target_sizes = torch.Tensor([image.size[::-1]])
52
  # Convert outputs (bounding boxes and class logits) to COCO API
53
- results = processor.post_process(outputs=outputs, target_sizes=target_sizes)
54
 
55
  i = 0 # Retrieve predictions for the first image for the corresponding text queries
56
  text = texts[i]
57
  boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
58
 
59
  # Print detected objects and rescaled box coordinates
60
- score_threshold = 0.1
61
  for box, score, label in zip(boxes, scores, labels):
62
  box = [round(i, 2) for i in box.tolist()]
63
- if score >= score_threshold:
64
- print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}")
65
  ```
66
 
67
 
 
50
  # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
51
  target_sizes = torch.Tensor([image.size[::-1]])
52
  # Convert outputs (bounding boxes and class logits) to COCO API
53
+ results = processor.post_process_object_detection(outputs=outputs, threshold=0.1, target_sizes=target_sizes)
54
 
55
  i = 0 # Retrieve predictions for the first image for the corresponding text queries
56
  text = texts[i]
57
  boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
58
 
59
  # Print detected objects and rescaled box coordinates
 
60
  for box, score, label in zip(boxes, scores, labels):
61
  box = [round(i, 2) for i in box.tolist()]
62
+ print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}")
 
63
  ```
64
 
65