Datasets:

Modalities:
Image
Formats:
parquet
Languages:
English
DOI:
Libraries:
Datasets
Dask
License:
File size: 3,439 Bytes
dfd73db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import datasets
import os
import json

_DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""

_HOMEPAGE = "https://huggingface.co/datasets/jpodivin/plantorgans"

_CITATION = """"""

_LICENSE = "MIT"

_NAMES = [
    'Leaf',
    'Stem',
    'Flower',
    'Fruit',
]

_BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/"
_TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)]
_TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)]
_METADATA_URLS = {
    'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_train.csv',
    'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_test.csv'
}


class PlantOrgansConfig(datasets.BuilderConfig):
    """Builder Config for PlantOrgans"""
 
    def __init__(self, data_url, metadata_urls, splits, **kwargs):
        """BuilderConfig for PlantOrgans.
        Args:
          data_url: `string`, url to download the zip file from.
          metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
          **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.data_url = data_url
        self.metadata_urls = metadata_urls
        self.splits = splits


class PlantOrgans(datasets.GeneratorBasedBuilder):
    """Plantorgans dataset
    """
    BUILDER_CONFIGS = [
        PlantOrgansConfig(
            name="semantic_segmentation_full",
            description="This configuration contains segmentation masks.",
            data_url=_BASE_URL,
            metadata_urls=_METADATA_URLS,
            splits=['train', 'test'],
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
        description=_DESCRIPTION,
        features=datasets.Features(
            {
                "image": datasets.Image(),
                "annotation": datasets.ClassLabel(names=_NAMES),
            }
        ),
        supervised_keys=("image", "annotation"),
        homepage=_HOMEPAGE,
        citation=_CITATION,
        license=_LICENSE,
    )


    def _split_generators(self, dl_manager):
        train_archive_path = dl_manager.download_and_extract(_TRAIN_URLS)
        test_archive_path = dl_manager.download_and_extract(_TEST_URLS)

        split_metadata_paths = dl_manager.download(_METADATA_URLS)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "images": dl_manager.iter_archive(os.path.join(train_archive_path, 'sourcedata/labeled')),
                    "metadata_path": split_metadata_paths["train"],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "images": dl_manager.iter_archive(os.path.join(test_archive_path, 'sourcedata/labeled')),
                    "metadata_path": split_metadata_paths["test"],
                },
            ),
        ]
    def _generate_examples(self, images, metadata_path):
        
        with open(metadata_path, 'w', encoding='utf-8') as fp:
            metadata = json.load(fp)
            images = metadata['image']
            annotations = metadata['annotations']