edouard-rolland commited on
Commit
f41026b
1 Parent(s): 2001a36

dataset uploaded by roboflow2huggingface package

Browse files
data/test.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71e7bbb244d61943b9d305ff7273c5cdb76ebe9b50dfc51a686fb3e240a2454b
3
  size 11752138
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb42b6bda0d920e726b46822f44504b611cac368187586f46f3c64d9fffa4f34
3
  size 11752138
data/train.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0748faf6423aba93576069f223ce19862e8a9a12f5530898c20a86b5bb91f258
3
  size 28996116
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e6d72d82a3235435ecdb2a6f493c44e75a56bbd99f072e41adf201cb46367c3
3
  size 28996116
data/valid-mini.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47a91b430ea58dc5ca52550751dc69fd16e63be3ac0a6e3f94d28f417a15bd70
3
- size 92590
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d10d081ba59735293c5cf928da69a2098a75a1178eb915d3e12d6b968db5136
3
+ size 71655
data/valid.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33d563505481e32fc10f9b606b2f1599cb7961cc88382bc9cd5fbc549585bb42
3
  size 7080579
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:add4db5cb0ad2e2fbe115a3c65b741bbd173bc397253f7994d875d5dfc7313d5
3
  size 7080579
thumbnail.jpg CHANGED

Git LFS Details

  • SHA256: d3b55f76482b522640c52753f79784ec70535f4cd3b3ba99530674bd3b8a0731
  • Pointer size: 130 Bytes
  • Size of remote file: 96.4 kB

Git LFS Details

  • SHA256: 216376b39e1d2fdde2254568cb1742892812edc4dc9c9e8b32c3631645dcb17a
  • Pointer size: 130 Bytes
  • Size of remote file: 92.8 kB
volcanic-plumes.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+
7
+
8
+ _HOMEPAGE = "https://universe.roboflow.com/edouardrolland/hiddenproject-wndzs/dataset/1"
9
+ _LICENSE = "CC BY 4.0"
10
+ _CITATION = """\
11
+ @misc{ hiddenproject-wndzs_dataset,
12
+ title = { HiddenProject Dataset },
13
+ type = { Open Source Dataset },
14
+ author = { EdouardRolland },
15
+ howpublished = { \\url{ https://universe.roboflow.com/edouardrolland/hiddenproject-wndzs } },
16
+ url = { https://universe.roboflow.com/edouardrolland/hiddenproject-wndzs },
17
+ journal = { Roboflow Universe },
18
+ publisher = { Roboflow },
19
+ year = { 2024 },
20
+ month = { feb },
21
+ note = { visited on 2024-02-01 },
22
+ }
23
+ """
24
+ _CATEGORIES = ['plume', 'summit']
25
+ _ANNOTATION_FILENAME = "_annotations.coco.json"
26
+
27
+
28
+ class VOLCANICPLUMESConfig(datasets.BuilderConfig):
29
+ """Builder Config for volcanic-plumes"""
30
+
31
+ def __init__(self, data_urls, **kwargs):
32
+ """
33
+ BuilderConfig for volcanic-plumes.
34
+
35
+ Args:
36
+ data_urls: `dict`, name to url to download the zip file from.
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ super(VOLCANICPLUMESConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
40
+ self.data_urls = data_urls
41
+
42
+
43
+ class VOLCANICPLUMES(datasets.GeneratorBasedBuilder):
44
+ """volcanic-plumes object detection dataset"""
45
+
46
+ VERSION = datasets.Version("1.0.0")
47
+ BUILDER_CONFIGS = [
48
+ VOLCANICPLUMESConfig(
49
+ name="full",
50
+ description="Full version of volcanic-plumes dataset.",
51
+ data_urls={
52
+ "train": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/train.zip",
53
+ "validation": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/valid.zip",
54
+ "test": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/test.zip",
55
+ },
56
+ ),
57
+ VOLCANICPLUMESConfig(
58
+ name="mini",
59
+ description="Mini version of volcanic-plumes dataset.",
60
+ data_urls={
61
+ "train": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/valid-mini.zip",
62
+ "validation": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/valid-mini.zip",
63
+ "test": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/valid-mini.zip",
64
+ },
65
+ )
66
+ ]
67
+
68
+ def _info(self):
69
+ features = datasets.Features(
70
+ {
71
+ "image_id": datasets.Value("int64"),
72
+ "image": datasets.Image(),
73
+ "width": datasets.Value("int32"),
74
+ "height": datasets.Value("int32"),
75
+ "objects": datasets.Sequence(
76
+ {
77
+ "id": datasets.Value("int64"),
78
+ "area": datasets.Value("int64"),
79
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
80
+ "category": datasets.ClassLabel(names=_CATEGORIES),
81
+ }
82
+ ),
83
+ }
84
+ )
85
+ return datasets.DatasetInfo(
86
+ features=features,
87
+ homepage=_HOMEPAGE,
88
+ citation=_CITATION,
89
+ license=_LICENSE,
90
+ )
91
+
92
+ def _split_generators(self, dl_manager):
93
+ data_files = dl_manager.download_and_extract(self.config.data_urls)
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ gen_kwargs={
98
+ "folder_dir": data_files["train"],
99
+ },
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.VALIDATION,
103
+ gen_kwargs={
104
+ "folder_dir": data_files["validation"],
105
+ },
106
+ ),
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TEST,
109
+ gen_kwargs={
110
+ "folder_dir": data_files["test"],
111
+ },
112
+ ),
113
+ ]
114
+
115
+ def _generate_examples(self, folder_dir):
116
+ def process_annot(annot, category_id_to_category):
117
+ return {
118
+ "id": annot["id"],
119
+ "area": annot["area"],
120
+ "bbox": annot["bbox"],
121
+ "category": category_id_to_category[annot["category_id"]],
122
+ }
123
+
124
+ image_id_to_image = {}
125
+ idx = 0
126
+
127
+ annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME)
128
+ with open(annotation_filepath, "r") as f:
129
+ annotations = json.load(f)
130
+ category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
131
+ image_id_to_annotations = collections.defaultdict(list)
132
+ for annot in annotations["annotations"]:
133
+ image_id_to_annotations[annot["image_id"]].append(annot)
134
+ filename_to_image = {image["file_name"]: image for image in annotations["images"]}
135
+
136
+ for filename in os.listdir(folder_dir):
137
+ filepath = os.path.join(folder_dir, filename)
138
+ if filename in filename_to_image:
139
+ image = filename_to_image[filename]
140
+ objects = [
141
+ process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
142
+ ]
143
+ with open(filepath, "rb") as f:
144
+ image_bytes = f.read()
145
+ yield idx, {
146
+ "image_id": image["id"],
147
+ "image": {"path": filepath, "bytes": image_bytes},
148
+ "width": image["width"],
149
+ "height": image["height"],
150
+ "objects": objects,
151
+ }
152
+ idx += 1