File size: 2,067 Bytes
2bfdb4e
77f342e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b958a1
 
77f342e
 
 
 
 
 
 
 
 
 
 
 
 
35b447c
 
77f342e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35b447c
77f342e
 
 
 
 
35b447c
77f342e
 
 
 
 
 
019e8cf
 
 
77f342e
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
"""Rock Glacier dataset with images of the chilean andes."""

import os

import datasets
from datasets.tasks import ImageClassification

_HOMEPAGE = "https://github.com/alcazar90/rock-glacier-detection"


_CITATION = """\
@ONLINE {rock-glacier-dataset,
    author="CMM-Glaciares",
    title="Rock Glacier Dataset",
    month="October",
    year="2022",
    url="https://github.com/alcazar90/rock-glacier-detection"
}
"""

_DESCRIPTION = """\
TODO: Add a description...
"""


_URLS = {
	"train": "https://huggingface.co/datasets/alkzar90/rock-glacier-dataset/resolve/main/data/train.zip",
	"validation": "https://huggingface.co/datasets/alkzar90/rock-glacier-dataset/resolve/main/data/validation.zip"
}

_NAMES = ["glaciar", "cordillera"]


class RockGlacierDataset(datasets.GeneratorBasedBuilder):
   """Rock Glacier images dataset."""

   def _info(self):
       return datasets.DatasetInfo(
       	   description=_DESCRIPTION,
       	   features=datasets.Features(
	       {
                   "image": datasets.Image(),
                   "labels": datasets.features.ClassLabel(names=_NAMES),
	       }
	   ),
	   supervised_keys=("image", "labels"),
	   homepage=_HOMEPAGE,
	   citation=_CITATION,
	   task_templates=[ImageClassification(image_column="image", label_column="labels")],
	)
	

   def _split_generators(self, dl_manager):
       data_files = dl_manager.download_and_extract(_URLS)
       return [
	   datasets.SplitGenerator(
	       name=datasets.Split.TRAIN,
	       gen_kwargs={
		   "files": dl_manager.iter_files([data_files["train"]]),
	       },
	   ),
	   datasets.SplitGenerator(
	       name=datasets.Split.VALIDATION,
	       gen_kwargs={
		   "files": dl_manager.iter_files([data_files["validation"]]),
	       },
 	   ),
       ]

   def _generate_examples(self, files):
       for i, path in enumerate(files):
           file_name = os.path.basename(path)
           if file_name.endswith(".png"):
               yield i, {
                   "image": path,
		   "labels": os.path.basename(os.path.dirname(path)).lower(),
	       }