|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""High-Level dataset.""" |
|
|
|
|
|
import json |
|
from pathlib import Path |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@misc{} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
High-level Dataset |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://github.com/michelecafagna26/HL-dataset" |
|
|
|
_LICENSE = "Apache 2.0" |
|
|
|
_IMG = "https://huggingface.co/datasets/michelecafagna26/hl/resolve/main/data/images.tar.gz" |
|
_TRAIN = "https://huggingface.co/datasets/michelecafagna26/hl/resolve/main/data/annotations/train.jsonl" |
|
_TEST = "https://huggingface.co/datasets/michelecafagna26/hl/resolve/main/data/annotations/test.jsonl" |
|
|
|
|
|
|
|
class HL(datasets.GeneratorBasedBuilder): |
|
"""High Level Dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"file_name": datasets.Value("string"), |
|
"image": datasets.Image(), |
|
"scene": datasets.Sequence(datasets.Value("string")), |
|
"action": datasets.Sequence(datasets.Value("string")), |
|
"rationale": datasets.Sequence(datasets.Value("string")), |
|
"object": datasets.Sequence(datasets.Value("string")), |
|
"confidence": { |
|
"scene": datasets.Sequence(datasets.Value("float32")), |
|
"action": datasets.Sequence(datasets.Value("float32")), |
|
"rationale": datasets.Sequence(datasets.Value("float32")), |
|
}, |
|
"purity": { |
|
"scene": datasets.Sequence(datasets.Value("float32")), |
|
"action": datasets.Sequence(datasets.Value("float32")), |
|
"rationale": datasets.Sequence(datasets.Value("float32")), |
|
}, |
|
"diversity": { |
|
"scene": datasets.Value("float32"), |
|
"action": datasets.Value("float32"), |
|
"rationale": datasets.Value("float32"), |
|
}, |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
image_files = dl_manager.download(_IMG) |
|
annotation_files = dl_manager.download_and_extract([_TRAIN, _TEST]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"annotation_file_path": annotation_files[0], |
|
"images": dl_manager.iter_archive(image_files), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"annotation_file_path": annotation_files[1], |
|
"images": dl_manager.iter_archive(image_files), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, annotation_file_path, images): |
|
|
|
idx = 0 |
|
|
|
|
|
|
|
with open(annotation_file_path, "r") as fp: |
|
metadata = {json.loads(item)['file_name']: json.loads(item) for item in fp} |
|
|
|
|
|
|
|
for img_file_path, img_obj in images: |
|
|
|
file_name = Path(img_file_path).name |
|
|
|
if file_name in metadata: |
|
yield idx, { |
|
"file_name": file_name, |
|
"image": {"path": img_file_path, "bytes": img_obj.read()}, |
|
"scene": metadata[file_name]['captions']['scene'], |
|
"action": metadata[file_name]['captions']['action'], |
|
"rationale": metadata[file_name]['captions']['rationale'], |
|
"object": metadata[file_name]['captions']['object'], |
|
"confidence": metadata[file_name]['confidence'], |
|
"purity": metadata[file_name]['purity'], |
|
"diversity": metadata[file_name]['diversity'], |
|
} |
|
idx += 1 |