nielsr HF staff commited on
Commit
e381b9b
1 Parent(s): ba7c73f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -8
README.md CHANGED
@@ -37,7 +37,7 @@ You can use the raw model for object detection. See the [model hub](https://hugg
37
  Here is how to use this model:
38
 
39
  ```python
40
- from transformers import AutoFeatureExtractor, DeformableDetrForObjectDetection
41
  import torch
42
  from PIL import Image
43
  import requests
@@ -45,24 +45,23 @@ import requests
45
  url = "http://images.cocodataset.org/val2017/000000039769.jpg"
46
  image = Image.open(requests.get(url, stream=True).raw)
47
 
48
- feature_extractor = AutoFeatureExtractor.from_pretrained("SenseTime/deformable-detr-with-box-refine-two-stage")
49
  model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr-with-box-refine-two-stage")
50
 
51
- inputs = feature_extractor(images=image, return_tensors="pt")
52
  outputs = model(**inputs)
53
 
54
  # convert outputs (bounding boxes and class logits) to COCO API
 
55
  target_sizes = torch.tensor([image.size[::-1]])
56
- results = feature_extractor.post_process(outputs, target_sizes=target_sizes)[0]
57
 
58
  for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
59
  box = [round(i, 2) for i in box.tolist()]
60
- # let's only keep detections with score > 0.7
61
- if score > 0.7:
62
- print(
63
  f"Detected {model.config.id2label[label.item()]} with confidence "
64
  f"{round(score.item(), 3)} at location {box}"
65
- )
66
  ```
67
 
68
  Currently, both the feature extractor and model support PyTorch.
 
37
  Here is how to use this model:
38
 
39
  ```python
40
+ from transformers import AutoImageProcessor, DeformableDetrForObjectDetection
41
  import torch
42
  from PIL import Image
43
  import requests
 
45
  url = "http://images.cocodataset.org/val2017/000000039769.jpg"
46
  image = Image.open(requests.get(url, stream=True).raw)
47
 
48
+ processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr-with-box-refine-two-stage")
49
  model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr-with-box-refine-two-stage")
50
 
51
+ inputs = processor(images=image, return_tensors="pt")
52
  outputs = model(**inputs)
53
 
54
  # convert outputs (bounding boxes and class logits) to COCO API
55
+ # let's only keep detections with score > 0.7
56
  target_sizes = torch.tensor([image.size[::-1]])
57
+ results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.7)[0]
58
 
59
  for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
60
  box = [round(i, 2) for i in box.tolist()]
61
+ print(
 
 
62
  f"Detected {model.config.id2label[label.item()]} with confidence "
63
  f"{round(score.item(), 3)} at location {box}"
64
+ )
65
  ```
66
 
67
  Currently, both the feature extractor and model support PyTorch.