Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
License:
albertvillanova HF staff commited on
Commit
3147fe9
1 Parent(s): 8dfb0eb

Delete loading script

Browse files
Files changed (1) hide show
  1. cppe-5.py +0 -134
cppe-5.py DELETED
@@ -1,134 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """CPPE-5 dataset."""
16
-
17
-
18
- import collections
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @misc{dagli2021cppe5,
27
- title={CPPE-5: Medical Personal Protective Equipment Dataset},
28
- author={Rishit Dagli and Ali Mustufa Shaikh},
29
- year={2021},
30
- eprint={2112.09569},
31
- archivePrefix={arXiv},
32
- primaryClass={cs.CV}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal
38
- to allow the study of subordinate categorization of medical personal protective equipments,
39
- which is not possible with other popular data sets that focus on broad level categories.
40
- """
41
-
42
- _HOMEPAGE = "https://sites.google.com/view/cppe5"
43
-
44
- _LICENSE = "Unknown"
45
-
46
- _URL = "https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr"
47
-
48
- _CATEGORIES = ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"]
49
-
50
-
51
- class CPPE5(datasets.GeneratorBasedBuilder):
52
- """CPPE - 5 dataset."""
53
-
54
- VERSION = datasets.Version("1.0.0")
55
-
56
- def _info(self):
57
- features = datasets.Features(
58
- {
59
- "image_id": datasets.Value("int64"),
60
- "image": datasets.Image(),
61
- "width": datasets.Value("int32"),
62
- "height": datasets.Value("int32"),
63
- "objects": datasets.Sequence(
64
- {
65
- "id": datasets.Value("int64"),
66
- "area": datasets.Value("int64"),
67
- "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
68
- "category": datasets.ClassLabel(names=_CATEGORIES),
69
- }
70
- ),
71
- }
72
- )
73
- return datasets.DatasetInfo(
74
- description=_DESCRIPTION,
75
- features=features,
76
- homepage=_HOMEPAGE,
77
- license=_LICENSE,
78
- citation=_CITATION,
79
- )
80
-
81
- def _split_generators(self, dl_manager):
82
- archive = dl_manager.download(_URL)
83
- return [
84
- datasets.SplitGenerator(
85
- name=datasets.Split.TRAIN,
86
- gen_kwargs={
87
- "annotation_file_path": "annotations/train.json",
88
- "files": dl_manager.iter_archive(archive),
89
- },
90
- ),
91
- datasets.SplitGenerator(
92
- name=datasets.Split.TEST,
93
- gen_kwargs={
94
- "annotation_file_path": "annotations/test.json",
95
- "files": dl_manager.iter_archive(archive),
96
- },
97
- ),
98
- ]
99
-
100
- def _generate_examples(self, annotation_file_path, files):
101
- def process_annot(annot, category_id_to_category):
102
- return {
103
- "id": annot["id"],
104
- "area": annot["area"],
105
- "bbox": annot["bbox"],
106
- "category": category_id_to_category[annot["category_id"]],
107
- }
108
-
109
- image_id_to_image = {}
110
- idx = 0
111
- # This loop relies on the ordering of the files in the archive:
112
- # Annotation files come first, then the images.
113
- for path, f in files:
114
- file_name = os.path.basename(path)
115
- if path == annotation_file_path:
116
- annotations = json.load(f)
117
- category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
118
- image_id_to_annotations = collections.defaultdict(list)
119
- for annot in annotations["annotations"]:
120
- image_id_to_annotations[annot["image_id"]].append(annot)
121
- image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
122
- elif file_name in image_id_to_image:
123
- image = image_id_to_image[file_name]
124
- objects = [
125
- process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
126
- ]
127
- yield idx, {
128
- "image_id": image["id"],
129
- "image": {"path": path, "bytes": f.read()},
130
- "width": image["width"],
131
- "height": image["height"],
132
- "objects": objects,
133
- }
134
- idx += 1