yonigozlan HF staff commited on
Commit
1854000
1 Parent(s): 5664e18

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +249 -249
README.md CHANGED
@@ -1,249 +1,249 @@
1
- ---
2
- license: apache-2.0
3
- task_categories:
4
- - object-detection
5
- tags:
6
- - COCO
7
- - Detection
8
- - '2017'
9
- pretty_name: COCO detection dataset script
10
- size_categories:
11
- - 100K<n<1M
12
- dataset_info:
13
- config_name: '2017'
14
- features:
15
- - name: id
16
- dtype: int64
17
- - name: objects
18
- struct:
19
- - name: bbox_id
20
- sequence: int64
21
- - name: category_id
22
- sequence:
23
- class_label:
24
- names:
25
- '0': N/A
26
- '1': person
27
- '2': bicycle
28
- '3': car
29
- '4': motorcycle
30
- '5': airplane
31
- '6': bus
32
- '7': train
33
- '8': truck
34
- '9': boat
35
- '10': traffic light
36
- '11': fire hydrant
37
- '12': street sign
38
- '13': stop sign
39
- '14': parking meter
40
- '15': bench
41
- '16': bird
42
- '17': cat
43
- '18': dog
44
- '19': horse
45
- '20': sheep
46
- '21': cow
47
- '22': elephant
48
- '23': bear
49
- '24': zebra
50
- '25': giraffe
51
- '26': hat
52
- '27': backpack
53
- '28': umbrella
54
- '29': shoe
55
- '30': eye glasses
56
- '31': handbag
57
- '32': tie
58
- '33': suitcase
59
- '34': frisbee
60
- '35': skis
61
- '36': snowboard
62
- '37': sports ball
63
- '38': kite
64
- '39': baseball bat
65
- '40': baseball glove
66
- '41': skateboard
67
- '42': surfboard
68
- '43': tennis racket
69
- '44': bottle
70
- '45': plate
71
- '46': wine glass
72
- '47': cup
73
- '48': fork
74
- '49': knife
75
- '50': spoon
76
- '51': bowl
77
- '52': banana
78
- '53': apple
79
- '54': sandwich
80
- '55': orange
81
- '56': broccoli
82
- '57': carrot
83
- '58': hot dog
84
- '59': pizza
85
- '60': donut
86
- '61': cake
87
- '62': chair
88
- '63': couch
89
- '64': potted plant
90
- '65': bed
91
- '66': mirror
92
- '67': dining table
93
- '68': window
94
- '69': desk
95
- '70': toilet
96
- '71': door
97
- '72': tv
98
- '73': laptop
99
- '74': mouse
100
- '75': remote
101
- '76': keyboard
102
- '77': cell phone
103
- '78': microwave
104
- '79': oven
105
- '80': toaster
106
- '81': sink
107
- '82': refrigerator
108
- '83': blender
109
- '84': book
110
- '85': clock
111
- '86': vase
112
- '87': scissors
113
- '88': teddy bear
114
- '89': hair drier
115
- '90': toothbrush
116
- - name: bbox
117
- sequence:
118
- sequence: float64
119
- length: 4
120
- - name: iscrowd
121
- sequence: int64
122
- - name: area
123
- sequence: float64
124
- - name: height
125
- dtype: int64
126
- - name: width
127
- dtype: int64
128
- - name: file_name
129
- dtype: string
130
- - name: coco_url
131
- dtype: string
132
- - name: image_path
133
- dtype: string
134
- splits:
135
- - name: train
136
- num_bytes: 87231216
137
- num_examples: 117266
138
- - name: validation
139
- num_bytes: 3692192
140
- num_examples: 4952
141
- download_size: 20405354669
142
- dataset_size: 90923408
143
- ---
144
- ## Usage
145
- For using the COCO dataset (2017), you need to download it manually first:
146
- ```bash
147
- wget http://images.cocodataset.org/zips/train2017.zip
148
- wget http://images.cocodataset.org/zips/val2017.zip
149
- wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
150
- ```
151
-
152
- Then to load the dataset:
153
- ```python
154
- COCO_DIR = ...(path to the downloaded dataset directory)...
155
- ds = datasets.load_dataset(
156
- "yonigozlan/coco_2017_detection_script",
157
- "2017",
158
- data_dir=COCO_DIR,
159
- trust_remote_code=True,
160
- )
161
- ```
162
-
163
- ## Benchmarking
164
- Here is an example of how to benchmark a 🤗 Transformers object detection model on the validation data of the COCO dataset:
165
-
166
- ```python
167
- import datasets
168
- import torch
169
- from PIL import Image
170
- from torch.utils.data import DataLoader
171
- from torchmetrics.detection.mean_ap import MeanAveragePrecision
172
- from tqdm import tqdm
173
-
174
- from transformers import AutoImageProcessor, AutoModelForObjectDetection
175
-
176
- # prepare data
177
- COCO_DIR = ...(path to the downloaded dataset directory)...
178
- ds = datasets.load_dataset(
179
- "yonigozlan/coco_2017_detection_script",
180
- "2017",
181
- data_dir=COCO_DIR,
182
- trust_remote_code=True,
183
- )
184
- val_data = ds["validation"]
185
- categories = val_data.features["objects"]["category_id"].feature.names
186
- id2label = {index: x for index, x in enumerate(categories, start=0)}
187
- label2id = {v: k for k, v in id2label.items()}
188
- checkpoint = "facebook/detr-resnet-50"
189
-
190
- # load model and processor
191
- model = AutoModelForObjectDetection.from_pretrained(
192
- checkpoint, torch_dtype=torch.float16
193
- ).to("cuda")
194
- id2label_model = model.config.id2label
195
- processor = AutoImageProcessor.from_pretrained(checkpoint)
196
-
197
-
198
- def collate_fn(batch):
199
- data = {}
200
- images = [Image.open(x["image_path"]).convert("RGB") for x in batch]
201
- data["images"] = images
202
- annotations = []
203
- for x in batch:
204
- boxes = x["objects"]["bbox"]
205
- # convert to xyxy format
206
- boxes = [[box[0], box[1], box[0] + box[2], box[1] + box[3]] for box in boxes]
207
- labels = x["objects"]["category_id"]
208
- boxes = torch.tensor(boxes)
209
- labels = torch.tensor(labels)
210
- annotations.append({"boxes": boxes, "labels": labels})
211
- data["original_size"] = [(x["height"], x["width"]) for x in batch]
212
- data["annotations"] = annotations
213
- return data
214
-
215
-
216
- # prepare dataloader
217
- dataloader = DataLoader(val_data, batch_size=8, collate_fn=collate_fn)
218
-
219
- # prepare metric
220
- metric = MeanAveragePrecision(box_format="xyxy", class_metrics=True)
221
-
222
- # evaluation loop
223
- for i, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
224
- inputs = (
225
- processor(batch["images"], return_tensors="pt").to("cuda").to(torch.float16)
226
- )
227
- with torch.no_grad():
228
- outputs = model(**inputs)
229
- target_sizes = torch.tensor([x for x in batch["original_size"]]).to("cuda")
230
- results = processor.post_process_object_detection(
231
- outputs, threshold=0.0, target_sizes=target_sizes
232
- )
233
-
234
- # convert predicted label id to dataset label id
235
- if len(id2label_model) != len(id2label):
236
- for result in results:
237
- result["labels"] = torch.tensor(
238
- [label2id.get(id2label_model[x.item()], 0) for x in result["labels"]]
239
- )
240
- # put results back to cpu
241
- for result in results:
242
- for k, v in result.items():
243
- if isinstance(v, torch.Tensor):
244
- result[k] = v.to("cpu")
245
- metric.update(results, batch["annotations"])
246
-
247
- metrics = metric.compute()
248
- print(metrics)
249
- ```
 
1
+ ---
2
+ license: cc-by-4.0
3
+ task_categories:
4
+ - object-detection
5
+ tags:
6
+ - COCO
7
+ - Detection
8
+ - '2017'
9
+ pretty_name: COCO detection dataset script
10
+ size_categories:
11
+ - 100K<n<1M
12
+ dataset_info:
13
+ config_name: '2017'
14
+ features:
15
+ - name: id
16
+ dtype: int64
17
+ - name: objects
18
+ struct:
19
+ - name: bbox_id
20
+ sequence: int64
21
+ - name: category_id
22
+ sequence:
23
+ class_label:
24
+ names:
25
+ '0': N/A
26
+ '1': person
27
+ '2': bicycle
28
+ '3': car
29
+ '4': motorcycle
30
+ '5': airplane
31
+ '6': bus
32
+ '7': train
33
+ '8': truck
34
+ '9': boat
35
+ '10': traffic light
36
+ '11': fire hydrant
37
+ '12': street sign
38
+ '13': stop sign
39
+ '14': parking meter
40
+ '15': bench
41
+ '16': bird
42
+ '17': cat
43
+ '18': dog
44
+ '19': horse
45
+ '20': sheep
46
+ '21': cow
47
+ '22': elephant
48
+ '23': bear
49
+ '24': zebra
50
+ '25': giraffe
51
+ '26': hat
52
+ '27': backpack
53
+ '28': umbrella
54
+ '29': shoe
55
+ '30': eye glasses
56
+ '31': handbag
57
+ '32': tie
58
+ '33': suitcase
59
+ '34': frisbee
60
+ '35': skis
61
+ '36': snowboard
62
+ '37': sports ball
63
+ '38': kite
64
+ '39': baseball bat
65
+ '40': baseball glove
66
+ '41': skateboard
67
+ '42': surfboard
68
+ '43': tennis racket
69
+ '44': bottle
70
+ '45': plate
71
+ '46': wine glass
72
+ '47': cup
73
+ '48': fork
74
+ '49': knife
75
+ '50': spoon
76
+ '51': bowl
77
+ '52': banana
78
+ '53': apple
79
+ '54': sandwich
80
+ '55': orange
81
+ '56': broccoli
82
+ '57': carrot
83
+ '58': hot dog
84
+ '59': pizza
85
+ '60': donut
86
+ '61': cake
87
+ '62': chair
88
+ '63': couch
89
+ '64': potted plant
90
+ '65': bed
91
+ '66': mirror
92
+ '67': dining table
93
+ '68': window
94
+ '69': desk
95
+ '70': toilet
96
+ '71': door
97
+ '72': tv
98
+ '73': laptop
99
+ '74': mouse
100
+ '75': remote
101
+ '76': keyboard
102
+ '77': cell phone
103
+ '78': microwave
104
+ '79': oven
105
+ '80': toaster
106
+ '81': sink
107
+ '82': refrigerator
108
+ '83': blender
109
+ '84': book
110
+ '85': clock
111
+ '86': vase
112
+ '87': scissors
113
+ '88': teddy bear
114
+ '89': hair drier
115
+ '90': toothbrush
116
+ - name: bbox
117
+ sequence:
118
+ sequence: float64
119
+ length: 4
120
+ - name: iscrowd
121
+ sequence: int64
122
+ - name: area
123
+ sequence: float64
124
+ - name: height
125
+ dtype: int64
126
+ - name: width
127
+ dtype: int64
128
+ - name: file_name
129
+ dtype: string
130
+ - name: coco_url
131
+ dtype: string
132
+ - name: image_path
133
+ dtype: string
134
+ splits:
135
+ - name: train
136
+ num_bytes: 87231216
137
+ num_examples: 117266
138
+ - name: validation
139
+ num_bytes: 3692192
140
+ num_examples: 4952
141
+ download_size: 20405354669
142
+ dataset_size: 90923408
143
+ ---
144
+ ## Usage
145
+ For using the COCO dataset (2017), you need to download it manually first:
146
+ ```bash
147
+ wget http://images.cocodataset.org/zips/train2017.zip
148
+ wget http://images.cocodataset.org/zips/val2017.zip
149
+ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
150
+ ```
151
+
152
+ Then to load the dataset:
153
+ ```python
154
+ COCO_DIR = ...(path to the downloaded dataset directory)...
155
+ ds = datasets.load_dataset(
156
+ "yonigozlan/coco_2017_detection_script",
157
+ "2017",
158
+ data_dir=COCO_DIR,
159
+ trust_remote_code=True,
160
+ )
161
+ ```
162
+
163
+ ## Benchmarking
164
+ Here is an example of how to benchmark a 🤗 Transformers object detection model on the validation data of the COCO dataset:
165
+
166
+ ```python
167
+ import datasets
168
+ import torch
169
+ from PIL import Image
170
+ from torch.utils.data import DataLoader
171
+ from torchmetrics.detection.mean_ap import MeanAveragePrecision
172
+ from tqdm import tqdm
173
+
174
+ from transformers import AutoImageProcessor, AutoModelForObjectDetection
175
+
176
+ # prepare data
177
+ COCO_DIR = ...(path to the downloaded dataset directory)...
178
+ ds = datasets.load_dataset(
179
+ "yonigozlan/coco_2017_detection_script",
180
+ "2017",
181
+ data_dir=COCO_DIR,
182
+ trust_remote_code=True,
183
+ )
184
+ val_data = ds["validation"]
185
+ categories = val_data.features["objects"]["category_id"].feature.names
186
+ id2label = {index: x for index, x in enumerate(categories, start=0)}
187
+ label2id = {v: k for k, v in id2label.items()}
188
+ checkpoint = "facebook/detr-resnet-50"
189
+
190
+ # load model and processor
191
+ model = AutoModelForObjectDetection.from_pretrained(
192
+ checkpoint, torch_dtype=torch.float16
193
+ ).to("cuda")
194
+ id2label_model = model.config.id2label
195
+ processor = AutoImageProcessor.from_pretrained(checkpoint)
196
+
197
+
198
+ def collate_fn(batch):
199
+ data = {}
200
+ images = [Image.open(x["image_path"]).convert("RGB") for x in batch]
201
+ data["images"] = images
202
+ annotations = []
203
+ for x in batch:
204
+ boxes = x["objects"]["bbox"]
205
+ # convert to xyxy format
206
+ boxes = [[box[0], box[1], box[0] + box[2], box[1] + box[3]] for box in boxes]
207
+ labels = x["objects"]["category_id"]
208
+ boxes = torch.tensor(boxes)
209
+ labels = torch.tensor(labels)
210
+ annotations.append({"boxes": boxes, "labels": labels})
211
+ data["original_size"] = [(x["height"], x["width"]) for x in batch]
212
+ data["annotations"] = annotations
213
+ return data
214
+
215
+
216
+ # prepare dataloader
217
+ dataloader = DataLoader(val_data, batch_size=8, collate_fn=collate_fn)
218
+
219
+ # prepare metric
220
+ metric = MeanAveragePrecision(box_format="xyxy", class_metrics=True)
221
+
222
+ # evaluation loop
223
+ for i, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
224
+ inputs = (
225
+ processor(batch["images"], return_tensors="pt").to("cuda").to(torch.float16)
226
+ )
227
+ with torch.no_grad():
228
+ outputs = model(**inputs)
229
+ target_sizes = torch.tensor([x for x in batch["original_size"]]).to("cuda")
230
+ results = processor.post_process_object_detection(
231
+ outputs, threshold=0.0, target_sizes=target_sizes
232
+ )
233
+
234
+ # convert predicted label id to dataset label id
235
+ if len(id2label_model) != len(id2label):
236
+ for result in results:
237
+ result["labels"] = torch.tensor(
238
+ [label2id.get(id2label_model[x.item()], 0) for x in result["labels"]]
239
+ )
240
+ # put results back to cpu
241
+ for result in results:
242
+ for k, v in result.items():
243
+ if isinstance(v, torch.Tensor):
244
+ result[k] = v.to("cpu")
245
+ metric.update(results, batch["annotations"])
246
+
247
+ metrics = metric.compute()
248
+ print(metrics)
249
+ ```