albertvillanova HF staff commited on
Commit
66f6a5e
1 Parent(s): 4d1d3ae

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (8dfb0ebc19f681973bcd14712924c95961e8820f)
- Delete loading script (3147fe9de25dcf5e0d79caef7b8ab15bb120a6e2)
- Delete legacy dataset_infos.json (7c1943574cc1130f735f5f7ff1ba50231b4aa166)

README.md CHANGED
@@ -50,13 +50,20 @@ dataset_info:
50
  '4': Mask
51
  splits:
52
  - name: train
53
- num_bytes: 240481257
54
  num_examples: 1000
55
  - name: test
56
- num_bytes: 4172715
57
  num_examples: 29
58
- download_size: 238482705
59
- dataset_size: 244653972
 
 
 
 
 
 
 
60
  ---
61
 
62
  # Dataset Card for CPPE - 5
50
  '4': Mask
51
  splits:
52
  - name: train
53
+ num_bytes: 240463364.0
54
  num_examples: 1000
55
  - name: test
56
+ num_bytes: 4172164.0
57
  num_examples: 29
58
+ download_size: 241152653
59
+ dataset_size: 244635528.0
60
+ configs:
61
+ - config_name: default
62
+ data_files:
63
+ - split: train
64
+ path: data/train-*
65
+ - split: test
66
+ path: data/test-*
67
  ---
68
 
69
  # Dataset Card for CPPE - 5
cppe-5.py DELETED
@@ -1,134 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """CPPE-5 dataset."""
16
-
17
-
18
- import collections
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @misc{dagli2021cppe5,
27
- title={CPPE-5: Medical Personal Protective Equipment Dataset},
28
- author={Rishit Dagli and Ali Mustufa Shaikh},
29
- year={2021},
30
- eprint={2112.09569},
31
- archivePrefix={arXiv},
32
- primaryClass={cs.CV}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal
38
- to allow the study of subordinate categorization of medical personal protective equipments,
39
- which is not possible with other popular data sets that focus on broad level categories.
40
- """
41
-
42
- _HOMEPAGE = "https://sites.google.com/view/cppe5"
43
-
44
- _LICENSE = "Unknown"
45
-
46
- _URL = "https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr"
47
-
48
- _CATEGORIES = ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"]
49
-
50
-
51
- class CPPE5(datasets.GeneratorBasedBuilder):
52
- """CPPE - 5 dataset."""
53
-
54
- VERSION = datasets.Version("1.0.0")
55
-
56
- def _info(self):
57
- features = datasets.Features(
58
- {
59
- "image_id": datasets.Value("int64"),
60
- "image": datasets.Image(),
61
- "width": datasets.Value("int32"),
62
- "height": datasets.Value("int32"),
63
- "objects": datasets.Sequence(
64
- {
65
- "id": datasets.Value("int64"),
66
- "area": datasets.Value("int64"),
67
- "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
68
- "category": datasets.ClassLabel(names=_CATEGORIES),
69
- }
70
- ),
71
- }
72
- )
73
- return datasets.DatasetInfo(
74
- description=_DESCRIPTION,
75
- features=features,
76
- homepage=_HOMEPAGE,
77
- license=_LICENSE,
78
- citation=_CITATION,
79
- )
80
-
81
- def _split_generators(self, dl_manager):
82
- archive = dl_manager.download(_URL)
83
- return [
84
- datasets.SplitGenerator(
85
- name=datasets.Split.TRAIN,
86
- gen_kwargs={
87
- "annotation_file_path": "annotations/train.json",
88
- "files": dl_manager.iter_archive(archive),
89
- },
90
- ),
91
- datasets.SplitGenerator(
92
- name=datasets.Split.TEST,
93
- gen_kwargs={
94
- "annotation_file_path": "annotations/test.json",
95
- "files": dl_manager.iter_archive(archive),
96
- },
97
- ),
98
- ]
99
-
100
- def _generate_examples(self, annotation_file_path, files):
101
- def process_annot(annot, category_id_to_category):
102
- return {
103
- "id": annot["id"],
104
- "area": annot["area"],
105
- "bbox": annot["bbox"],
106
- "category": category_id_to_category[annot["category_id"]],
107
- }
108
-
109
- image_id_to_image = {}
110
- idx = 0
111
- # This loop relies on the ordering of the files in the archive:
112
- # Annotation files come first, then the images.
113
- for path, f in files:
114
- file_name = os.path.basename(path)
115
- if path == annotation_file_path:
116
- annotations = json.load(f)
117
- category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
118
- image_id_to_annotations = collections.defaultdict(list)
119
- for annot in annotations["annotations"]:
120
- image_id_to_annotations[annot["image_id"]].append(annot)
121
- image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
122
- elif file_name in image_id_to_image:
123
- image = image_id_to_image[file_name]
124
- objects = [
125
- process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
126
- ]
127
- yield idx, {
128
- "image_id": image["id"],
129
- "image": {"path": path, "bytes": f.read()},
130
- "width": image["width"],
131
- "height": image["height"],
132
- "objects": objects,
133
- }
134
- idx += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba7343d525f53dab1e84e0c5a86087960cb739e697b74d60b84bd28253e84740
3
+ size 4137134
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65f603cc9b482a529496acf0018b93590243d3cfc15b40e6e858dde8284dadd6
3
+ size 237015519
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal\nto allow the study of subordinate categorization of medical personal protective equipments,\nwhich is not possible with other popular data sets that focus on broad level categories.\n", "citation": "@misc{dagli2021cppe5,\n title={CPPE-5: Medical Personal Protective Equipment Dataset},\n author={Rishit Dagli and Ali Mustufa Shaikh},\n year={2021},\n eprint={2112.09569},\n archivePrefix={arXiv},\n primaryClass={cs.CV}\n}\n", "homepage": "https://sites.google.com/view/cppe5", "license": "Unknown", "features": {"image_id": {"dtype": "int64", "id": null, "_type": "Value"}, "image": {"id": null, "_type": "Image"}, "width": {"dtype": "int32", "id": null, "_type": "Value"}, "height": {"dtype": "int32", "id": null, "_type": "Value"}, "objects": {"feature": {"id": {"dtype": "int64", "id": null, "_type": "Value"}, "area": {"dtype": "int64", "id": null, "_type": "Value"}, "bbox": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}, "category": {"num_classes": 5, "names": ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "cppe5", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 240481281, "num_examples": 1000, "dataset_name": "cppe5"}, "test": {"name": "test", "num_bytes": 4172739, "num_examples": 29, "dataset_name": "cppe5"}}, "download_checksums": {"https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr": {"num_bytes": 238482705, "checksum": "1151086e59fcb87825ecf4d362847a3f023ba69e7ace0f513d5aadc0e3dd3094"}}, "download_size": 238482705, "post_processing_size": null, "dataset_size": 244654020, "size_in_bytes": 483136725}}