Add inference section
Browse filesThis PR adds a code snippet showcasing end-to-end inference.
README.md
CHANGED
@@ -6,6 +6,7 @@ tags:
|
|
6 |
- yolov10
|
7 |
datasets:
|
8 |
- detection-datasets/coco
|
|
|
9 |
---
|
10 |
|
11 |
### Model Description
|
@@ -16,10 +17,10 @@ datasets:
|
|
16 |
|
17 |
### Installation
|
18 |
```
|
19 |
-
pip install
|
20 |
```
|
21 |
|
22 |
-
###
|
23 |
```python
|
24 |
from ultralytics import YOLOv10
|
25 |
|
@@ -36,6 +37,62 @@ model.val(...)
|
|
36 |
model.predict(...)
|
37 |
```
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
### BibTeX Entry and Citation Info
|
40 |
```
|
41 |
@article{wang2024yolov10,
|
|
|
6 |
- yolov10
|
7 |
datasets:
|
8 |
- detection-datasets/coco
|
9 |
+
inference: false
|
10 |
---
|
11 |
|
12 |
### Model Description
|
|
|
17 |
|
18 |
### Installation
|
19 |
```
|
20 |
+
pip install git+https://github.com/THU-MIG/yolov10.git supervision
|
21 |
```
|
22 |
|
23 |
+
### Training/Validation
|
24 |
```python
|
25 |
from ultralytics import YOLOv10
|
26 |
|
|
|
37 |
model.predict(...)
|
38 |
```
|
39 |
|
40 |
+
### Inference
|
41 |
+
|
42 |
+
Here's an end-to-end example showcasing inference on a cats image:
|
43 |
+
|
44 |
+
```python
|
45 |
+
from ultralytics import YOLOv10
|
46 |
+
import supervision as sv
|
47 |
+
from PIL import Image
|
48 |
+
import requests
|
49 |
+
|
50 |
+
# load model
|
51 |
+
model = YOLOv10.from_pretrained("nielsr/yolov10n")
|
52 |
+
|
53 |
+
# load image
|
54 |
+
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
55 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
56 |
+
image = np.array(image)
|
57 |
+
|
58 |
+
# perform inference
|
59 |
+
results = model(source=image, conf=0.25, verbose=False)[0]
|
60 |
+
detections = sv.Detections.from_ultralytics(results)
|
61 |
+
box_annotator = sv.BoxAnnotator()
|
62 |
+
|
63 |
+
category_dict = {
|
64 |
+
0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus',
|
65 |
+
6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant',
|
66 |
+
11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat',
|
67 |
+
16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear',
|
68 |
+
22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag',
|
69 |
+
27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard',
|
70 |
+
32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove',
|
71 |
+
36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle',
|
72 |
+
40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl',
|
73 |
+
46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli',
|
74 |
+
51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake',
|
75 |
+
56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table',
|
76 |
+
61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard',
|
77 |
+
67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink',
|
78 |
+
72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors',
|
79 |
+
77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'
|
80 |
+
}
|
81 |
+
|
82 |
+
labels = [
|
83 |
+
f"{category_dict[class_id]} {confidence:.2f}"
|
84 |
+
for class_id, confidence in zip(detections.class_id, detections.confidence)
|
85 |
+
]
|
86 |
+
annotated_image = box_annotator.annotate(
|
87 |
+
image.copy(), detections=detections, labels=labels
|
88 |
+
)
|
89 |
+
|
90 |
+
Image.fromarray(annotated_image)
|
91 |
+
```
|
92 |
+
which shows:
|
93 |
+
|
94 |
+
![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f1158120c833276f61f1a84/N8cV9jam_UsEzexp-W98n.png)
|
95 |
+
|
96 |
### BibTeX Entry and Citation Info
|
97 |
```
|
98 |
@article{wang2024yolov10,
|