Datasets:

Modalities:
Image
Formats:
parquet
Languages:
English
DOI:
Libraries:
Datasets
Dask
License:
plantorgans / plantorgans.py
jpodivin's picture
Loading script update
8608511
raw
history blame
4.81 kB
import datasets
import pandas as pd
import glob
from pathlib import Path
_DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
_HOMEPAGE = "https://huggingface.co/datasets/jpodivin/plantorgans"
_CITATION = """"""
_LICENSE = "MIT"
_NAMES = [
'Leaf',
'Stem',
'Flower',
'Fruit',
]
_BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/"
_TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)]
_TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)]
_MASKS_URLS = [_BASE_URL + f"masks.tar.0{i}" for i in range(0, 2)]
_METADATA_URLS = {
'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_train.csv',
'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_test.csv'
}
class PlantOrgansConfig(datasets.BuilderConfig):
"""Builder Config for PlantOrgans"""
def __init__(self, data_url, metadata_urls, splits, **kwargs):
"""BuilderConfig for PlantOrgans.
Args:
data_url: `string`, url to download the zip file from.
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_url = data_url
self.metadata_urls = metadata_urls
self.splits = splits
class PlantOrgans(datasets.GeneratorBasedBuilder):
"""Plantorgans dataset
"""
BUILDER_CONFIGS = [
PlantOrgansConfig(
name="semantic_segmentation_full",
description="This configuration contains segmentation masks.",
data_url=_BASE_URL,
metadata_urls=_METADATA_URLS,
splits=['train', 'test'],
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"mask": datasets.Image(),
}
),
supervised_keys=("image", "annotation"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
train_archives_paths = dl_manager.download_and_extract(_TRAIN_URLS)
test_archives_paths = dl_manager.download_and_extract(_TEST_URLS)
train_paths = []
test_paths = []
for p in train_archives_paths:
train_paths.extend(glob.glob(str(p)+'/sourcedata/labeled/**.jpg'))
for p in test_archives_paths:
test_paths.extend(glob.glob(str(p)+'/sourcedata/labeled/**.jpg'))
split_metadata_paths = dl_manager.download(_METADATA_URLS)
mask_archives_paths = dl_manager.download_and_extract(_MASKS_URLS)
mask_paths = []
for p in mask_archives_paths:
mask_paths.extend(glob.glob(str(p)+'/masks/**.png'))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": train_paths,
"metadata_path": split_metadata_paths["train"],
"masks_path": mask_paths,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": test_paths,
"metadata_path": split_metadata_paths["test"],
"masks_path": mask_paths,
},
),
]
def _generate_examples(self, images, metadata_path, masks_path):
"""
images: path to image directory
metadata_path: path to metadata csv
"""
# Get local image paths
image_paths = pd.DataFrame(
[(str(Path(*Path(e).parts[-3:])), e) for e in images], columns=['image', 'image_path'])
# Get local mask paths
masks_paths = pd.DataFrame(
[(str(Path(*Path(e).parts[-2:])), e) for e in masks_path], columns=['mask', 'mask_path'])
# Get all common about images and masks from csv
metadata = pd.read_csv(metadata_path)
# Merge dataframes
metadata = metadata.merge(masks_paths, on='mask', how='inner')
metadata = metadata.merge(image_paths, on='image', how='inner')
# Make examples and yield
for i, r in metadata.iterrows():
# Each example must contain path to image and list of annotations under object key
yield i, {'mask': r['mask_path'], 'image': r['image_path']}