File size: 3,658 Bytes
d3a579f 1b4f577 d3a579f eeff54e d3a579f cad60b7 e6e2efb cad60b7 e6e2efb cad60b7 d3a579f e8bd074 d3a579f e6e2efb d3a579f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from glob import glob
import json
import os
from pathlib import Path
import datasets
from PIL import Image
# _URL = "https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr"
_HOMEPAGE = "https://sites.google.com/view/cppe5"
_LICENSE = "Unknown"
_CATEGORIES = ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"]
_CITATION = """\
@misc{dagli2021cppe5,
title={CPPE-5: Medical Personal Protective Equipment Dataset},
author={Rishit Dagli and Ali Mustufa Shaikh},
year={2021},
eprint={2112.09569},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
_DESCRIPTION = """\
CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal
to allow the study of subordinate categorization of medical personal protective equipments,
which is not possible with other popular data sets that focus on broad level categories.
"""
class CPPE5(datasets.GeneratorBasedBuilder):
"""CPPE - 5 dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features(
{
"image_id": datasets.Value("int64"),
"image": datasets.Image(),
"width": datasets.Value("int32"),
"height": datasets.Value("int32"),
"objects": datasets.Sequence(
feature=datasets.Features({
"id": datasets.Value("int64"),
"area": datasets.Value("int64"),
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
"category": datasets.ClassLabel(names=_CATEGORIES),
})
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
train_json = dl_manager.download("data/annotations/train.jsonl")
test_json = dl_manager.download("data/annotations/test.jsonl")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"archive_path": train_json,
"dl_manager": dl_manager,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"archive_path": test_json,
"dl_manager": dl_manager,
},
),
]
def _generate_examples(self, archive_path, dl_manager):
"""Yields examples."""
archive_path = Path(archive_path)
idx = 0
with open(archive_path, "r", encoding="utf-8") as f:
for row in f:
sample = json.loads(row)
file_path = sample["image"]
file_path = dl_manager.download(file_path)
with open(file_path, "rb") as image_f:
image_bytes = image_f.read()
# image = Image.open(image_f)
yield idx, {
"image_id": sample["image_id"],
"image": {"path": file_path, "bytes": image_bytes},
# "image": image,
"width": sample["width"],
"height": sample["height"],
"objects": sample["objects"],
}
idx += 1
if __name__ == '__main__':
pass
|