Datasets:

Modalities:
Image
Formats:
parquet
Languages:
English
DOI:
Libraries:
Datasets
Dask
License:
jpodivin commited on
Commit
dfd73db
1 Parent(s): 4b56fd9
.gitattributes CHANGED
@@ -56,3 +56,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Repo specific
57
  labels.csv filter=lfs diff=lfs merge=lfs -text
58
  labels_raw.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
56
  # Repo specific
57
  labels.csv filter=lfs diff=lfs merge=lfs -text
58
  labels_raw.json filter=lfs diff=lfs merge=lfs -text
59
+ labels_test.csv filter=lfs diff=lfs merge=lfs -text
60
+ labels_train.csv filter=lfs diff=lfs merge=lfs -text
61
+ labels_raw filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ test*
labels.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27866cd0317906ce4f17b5a25609d62d7dee21dffe3183de3cfc8ec0b8ec8105
3
- size 3595889996
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a49b0dfbc6791a093ca4f5ea494bf9ce7c0b38ec7dc3dd528f3cb215c2ccc941
3
+ size 3989805491
labels_raw.json → labels_raw.json.tar.gz RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:168e4571e61a489453f8262108557b8fe3d07b4e2b341fbef5340181b1ff663d
3
- size 3993693859
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b73aa96821ec151fd2da7ef66ee93c1218d2052e536f30c56eb62b7f266f31e
3
+ size 471437296
labels_raw.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdfccc23b41dc268ab608d08662b9f90115fcb600423dad13dd87956063de5db
3
+ size 424277514
labels_test.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3fb37cbaa31b31acb6dd7c37b2fd95b21d2d1ff977133b7f355f294ff7b9443
3
+ size 1170590470
labels_train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:718bddbe59318b16d777d0f3f674ac06e1d97b0468b4e9ae152291dd3e8d73cf
3
+ size 2820756387
plantorgans.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import os
3
+ import json
4
+
5
+ _DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
6
+
7
+ _HOMEPAGE = "https://huggingface.co/datasets/jpodivin/plantorgans"
8
+
9
+ _CITATION = """"""
10
+
11
+ _LICENSE = "MIT"
12
+
13
+ _NAMES = [
14
+ 'Leaf',
15
+ 'Stem',
16
+ 'Flower',
17
+ 'Fruit',
18
+ ]
19
+
20
+ _BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/"
21
+ _TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)]
22
+ _TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)]
23
+ _METADATA_URLS = {
24
+ 'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_train.csv',
25
+ 'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_test.csv'
26
+ }
27
+
28
+
29
+ class PlantOrgansConfig(datasets.BuilderConfig):
30
+ """Builder Config for PlantOrgans"""
31
+
32
+ def __init__(self, data_url, metadata_urls, splits, **kwargs):
33
+ """BuilderConfig for PlantOrgans.
34
+ Args:
35
+ data_url: `string`, url to download the zip file from.
36
+ metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
40
+ self.data_url = data_url
41
+ self.metadata_urls = metadata_urls
42
+ self.splits = splits
43
+
44
+
45
+ class PlantOrgans(datasets.GeneratorBasedBuilder):
46
+ """Plantorgans dataset
47
+ """
48
+ BUILDER_CONFIGS = [
49
+ PlantOrgansConfig(
50
+ name="semantic_segmentation_full",
51
+ description="This configuration contains segmentation masks.",
52
+ data_url=_BASE_URL,
53
+ metadata_urls=_METADATA_URLS,
54
+ splits=['train', 'test'],
55
+ ),
56
+ ]
57
+
58
+ def _info(self):
59
+ return datasets.DatasetInfo(
60
+ description=_DESCRIPTION,
61
+ features=datasets.Features(
62
+ {
63
+ "image": datasets.Image(),
64
+ "annotation": datasets.ClassLabel(names=_NAMES),
65
+ }
66
+ ),
67
+ supervised_keys=("image", "annotation"),
68
+ homepage=_HOMEPAGE,
69
+ citation=_CITATION,
70
+ license=_LICENSE,
71
+ )
72
+
73
+
74
+ def _split_generators(self, dl_manager):
75
+ train_archive_path = dl_manager.download_and_extract(_TRAIN_URLS)
76
+ test_archive_path = dl_manager.download_and_extract(_TEST_URLS)
77
+
78
+ split_metadata_paths = dl_manager.download(_METADATA_URLS)
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ gen_kwargs={
83
+ "images": dl_manager.iter_archive(os.path.join(train_archive_path, 'sourcedata/labeled')),
84
+ "metadata_path": split_metadata_paths["train"],
85
+ },
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TEST,
89
+ gen_kwargs={
90
+ "images": dl_manager.iter_archive(os.path.join(test_archive_path, 'sourcedata/labeled')),
91
+ "metadata_path": split_metadata_paths["test"],
92
+ },
93
+ ),
94
+ ]
95
+ def _generate_examples(self, images, metadata_path):
96
+
97
+ with open(metadata_path, 'w', encoding='utf-8') as fp:
98
+ metadata = json.load(fp)
99
+ images = metadata['image']
100
+ annotations = metadata['annotations']