# coding=utf-8 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """High-Level dataset.""" import json from pathlib import Path import datasets _CITATION = """\ @inproceedings{Cafagna2023HLDG, title={HL Dataset: Grounding High-Level Linguistic Concepts in Vision}, author={Michele Cafagna and Kees van Deemter and Albert Gatt}, year={2023} } """ _DESCRIPTION = """\ High-level Dataset """ # github link _HOMEPAGE = "https://github.com/michelecafagna26/HL-dataset" _LICENSE = "Apache 2.0" _IMG = "https://huggingface.co/datasets/michelecafagna26/hl/resolve/main/data/images.tar.gz" _TRAIN = "https://huggingface.co/datasets/michelecafagna26/hl/resolve/main/data/annotations/train.jsonl" _TEST = "https://huggingface.co/datasets/michelecafagna26/hl/resolve/main/data/annotations/test.jsonl" class HL(datasets.GeneratorBasedBuilder): """High Level Dataset.""" VERSION = datasets.Version("1.0.0") def _info(self): features = datasets.Features( { "file_name": datasets.Value("string"), "image": datasets.Image(), "scene": datasets.Sequence(datasets.Value("string")), "action": datasets.Sequence(datasets.Value("string")), "rationale": datasets.Sequence(datasets.Value("string")), "object": datasets.Sequence(datasets.Value("string")), "confidence": { "scene": datasets.Sequence(datasets.Value("float32")), "action": datasets.Sequence(datasets.Value("float32")), "rationale": datasets.Sequence(datasets.Value("float32")), }, "purity": { "scene": datasets.Sequence(datasets.Value("float32")), "action": datasets.Sequence(datasets.Value("float32")), "rationale": datasets.Sequence(datasets.Value("float32")), }, "diversity": { "scene": datasets.Value("float32"), "action": datasets.Value("float32"), "rationale": datasets.Value("float32"), }, } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): image_files = dl_manager.download(_IMG) annotation_files = dl_manager.download_and_extract([_TRAIN, _TEST]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "annotation_file_path": annotation_files[0], "images": dl_manager.iter_archive(image_files), }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "annotation_file_path": annotation_files[1], "images": dl_manager.iter_archive(image_files), }, ), ] def _generate_examples(self, annotation_file_path, images): idx = 0 #assert Path(annotation_file_path).suffix == ".jsonl" with open(annotation_file_path, "r") as fp: metadata = {json.loads(item)['file_name']: json.loads(item) for item in fp} # This loop relies on the ordering of the files in the archive: # Annotation files come first, then the images. for img_file_path, img_obj in images: file_name = Path(img_file_path).name if file_name in metadata: yield idx, { "file_name": file_name, "image": {"path": img_file_path, "bytes": img_obj.read()}, "scene": metadata[file_name]['captions']['scene'], "action": metadata[file_name]['captions']['action'], "rationale": metadata[file_name]['captions']['rationale'], "object": metadata[file_name]['captions']['object'], "confidence": metadata[file_name]['confidence'], "purity": metadata[file_name]['purity'], "diversity": metadata[file_name]['diversity'], } idx += 1