Datasets:

Modalities:
Image
Formats:
parquet
Languages:
English
DOI:
Libraries:
Datasets
Dask
License:
plantorgans / plantorgans.py
jpodivin's picture
loadingscript (#5)
dfd73db
raw
history blame
No virus
3.44 kB
import datasets
import os
import json
_DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
_HOMEPAGE = "https://huggingface.co/datasets/jpodivin/plantorgans"
_CITATION = """"""
_LICENSE = "MIT"
_NAMES = [
'Leaf',
'Stem',
'Flower',
'Fruit',
]
_BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/"
_TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)]
_TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)]
_METADATA_URLS = {
'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_train.csv',
'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_test.csv'
}
class PlantOrgansConfig(datasets.BuilderConfig):
"""Builder Config for PlantOrgans"""
def __init__(self, data_url, metadata_urls, splits, **kwargs):
"""BuilderConfig for PlantOrgans.
Args:
data_url: `string`, url to download the zip file from.
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_url = data_url
self.metadata_urls = metadata_urls
self.splits = splits
class PlantOrgans(datasets.GeneratorBasedBuilder):
"""Plantorgans dataset
"""
BUILDER_CONFIGS = [
PlantOrgansConfig(
name="semantic_segmentation_full",
description="This configuration contains segmentation masks.",
data_url=_BASE_URL,
metadata_urls=_METADATA_URLS,
splits=['train', 'test'],
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"annotation": datasets.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "annotation"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
train_archive_path = dl_manager.download_and_extract(_TRAIN_URLS)
test_archive_path = dl_manager.download_and_extract(_TEST_URLS)
split_metadata_paths = dl_manager.download(_METADATA_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(os.path.join(train_archive_path, 'sourcedata/labeled')),
"metadata_path": split_metadata_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": dl_manager.iter_archive(os.path.join(test_archive_path, 'sourcedata/labeled')),
"metadata_path": split_metadata_paths["test"],
},
),
]
def _generate_examples(self, images, metadata_path):
with open(metadata_path, 'w', encoding='utf-8') as fp:
metadata = json.load(fp)
images = metadata['image']
annotations = metadata['annotations']