Datasets:
Sub-tasks:
instance-segmentation
Size Categories:
10K<n<100K
Annotations Creators:
crowdsourced
License:
""" | |
Inspired from | |
https://huggingface.co/datasets/ydshieh/coco_dataset_script/blob/main/coco_dataset_script.py | |
""" | |
import json | |
import os | |
import datasets | |
class COCOBuilderConfig(datasets.BuilderConfig): | |
def __init__(self, name, splits, **kwargs): | |
super().__init__(name, **kwargs) | |
self.splits = splits | |
# Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@article{doclaynet2022, | |
title = {DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis}, | |
doi = {10.1145/3534678.353904}, | |
url = {https://arxiv.org/abs/2206.01062}, | |
author = {Pfitzmann, Birgit and Auer, Christoph and Dolfi, Michele and Nassar, Ahmed S and Staar, Peter W J}, | |
year = {2022} | |
} | |
""" | |
# Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
DocLayNet is a human-annotated document layout segmentation dataset from a broad variety of document sources. | |
""" | |
# Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "https://developer.ibm.com/exchanges/data/all/doclaynet/" | |
# Add the licence for the dataset here if you can find it | |
_LICENSE = "CDLA-Permissive-1.0" | |
# Add link to the official dataset URLs here | |
# The HuggingFace dataset library don't host the datasets but only point to the original files | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
# This script is supposed to work with local (downloaded) COCO dataset. | |
_URLs = { | |
"core": "https://codait-cos-dax.s3.us.cloud-object-storage.appdomain.cloud/dax-doclaynet/1.0.0/DocLayNet_core.zip", | |
} | |
# Name of the dataset usually match the script name with CamelCase instead of snake_case | |
class COCODataset(datasets.GeneratorBasedBuilder): | |
"""An example dataset script to work with the local (downloaded) COCO dataset""" | |
VERSION = datasets.Version("1.0.0") | |
BUILDER_CONFIG_CLASS = COCOBuilderConfig | |
BUILDER_CONFIGS = [ | |
COCOBuilderConfig(name='2022.08', splits=['train', 'val', 'test']), | |
] | |
DEFAULT_CONFIG_NAME = "2022.08" | |
def _info(self): | |
# This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
feature_dict = { | |
"id": datasets.Value("int64"), | |
"height": datasets.Value("int64"), | |
"width": datasets.Value("int64"), | |
"file_name": datasets.Value("string"), | |
# Custom fields | |
"doc_category": datasets.Value("string"), # high-level document category | |
"collection": datasets.Value("string"), # sub-collection name | |
"doc_name": datasets.Value("string"), # original document filename | |
"page_no": datasets.Value("int64"), # page number in original document | |
# "precedence": datasets.Value("int64"), # annotation order, non-zero in case of redundant double- or triple-annotation | |
} | |
features = datasets.Features(feature_dict) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, | |
# specify them here. They'll be used if as_supervised=True in | |
# builder.as_dataset. | |
supervised_keys=None, | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
# This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# data_dir = self.config.data_dir | |
# if not data_dir: | |
# raise ValueError( | |
# "This script is supposed to work with local (downloaded) COCO dataset. The argument `data_dir` in `load_dataset()` is required." | |
# ) | |
# _DL_URLS = { | |
# "train": os.path.join(data_dir, "train2017.zip"), | |
# "val": os.path.join(data_dir, "val2017.zip"), | |
# "test": os.path.join(data_dir, "test2017.zip"), | |
# "annotations_trainval": os.path.join(data_dir, "annotations_trainval2017.zip"), | |
# "image_info_test": os.path.join(data_dir, "image_info_test2017.zip"), | |
# } | |
archive_path = dl_manager.download_and_extract(_URLs) | |
print("archive_path: ", archive_path) | |
splits = [] | |
for split in self.config.splits: | |
if split == 'train': | |
dataset = datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"json_path": os.path.join(archive_path["core"], "COCO", "train.json"), | |
"image_dir": os.path.join(archive_path["core"], "PNG"), | |
"split": "train", | |
} | |
) | |
elif split in ['val', 'valid', 'validation', 'dev']: | |
dataset = datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"json_path": os.path.join(archive_path["core"], "COCO", "val.json"), | |
"image_dir": os.path.join(archive_path["core"], "PNG"), | |
"split": "val", | |
}, | |
) | |
elif split == 'test': | |
dataset = datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"json_path": os.path.join(archive_path["core"], "COCO", "test.json"), | |
"image_dir": os.path.join(archive_path["core"], "PNG"), | |
"split": "test", | |
}, | |
) | |
else: | |
continue | |
splits.append(dataset) | |
return splits | |
def _generate_examples( | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
self, json_path, image_dir, split | |
): | |
""" Yields examples as (key, example) tuples. """ | |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is here for legacy reason (tfds) and is not important in itself. | |
_features = ["image_id", "image_path", "doc_category", "collection", "height", "width", "file_name", "doc_name", "page_no", "id"] | |
features = list(_features) | |
with open(json_path, 'r', encoding='UTF-8') as fp: | |
data = json.load(fp) | |
# list of dict | |
images = data["images"] | |
entries = images | |
# build a dict of image_id -> image info dict | |
d = {image["id"]: image for image in images} | |
# list of dict | |
if split in ["train", "val"]: | |
annotations = data["annotations"] | |
# build a dict of image_id -> | |
for annotation in annotations: | |
_id = annotation["id"] | |
image_info = d[annotation["image_id"]] | |
annotation.update(image_info) | |
annotation["id"] = _id | |
entries = annotations | |
for id_, entry in enumerate(entries): | |
entry = {k: v for k, v in entry.items() if k in features} | |
if split == "test": | |
entry["image_id"] = entry["id"] | |
entry["id"] = -1 | |
entry["image_path"] = os.path.join(image_dir, entry["file_name"]) | |
entry = {k: entry[k] for k in _features if k in entry} | |
yield str((entry["image_id"], entry["id"])), entry | |