edouard-rolland commited on
Commit
2001a36
1 Parent(s): 77ccce3

Delete volcanic-plumes.py

Browse files
Files changed (1) hide show
  1. volcanic-plumes.py +0 -152
volcanic-plumes.py DELETED
@@ -1,152 +0,0 @@
1
- import collections
2
- import json
3
- import os
4
-
5
- import datasets
6
-
7
-
8
- _HOMEPAGE = "https://universe.roboflow.com/edouardrolland/hiddenproject-wndzs/dataset/1"
9
- _LICENSE = "CC BY 4.0"
10
- _CITATION = """\
11
- @misc{ hiddenproject-wndzs_dataset,
12
- title = { HiddenProject Dataset },
13
- type = { Open Source Dataset },
14
- author = { EdouardRolland },
15
- howpublished = { \\url{ https://universe.roboflow.com/edouardrolland/hiddenproject-wndzs } },
16
- url = { https://universe.roboflow.com/edouardrolland/hiddenproject-wndzs },
17
- journal = { Roboflow Universe },
18
- publisher = { Roboflow },
19
- year = { 2024 },
20
- month = { feb },
21
- note = { visited on 2024-02-01 },
22
- }
23
- """
24
- _CATEGORIES = ['plume', 'summit']
25
- _ANNOTATION_FILENAME = "_annotations.coco.json"
26
-
27
-
28
- class VOLCANICPLUMESConfig(datasets.BuilderConfig):
29
- """Builder Config for volcanic-plumes"""
30
-
31
- def __init__(self, data_urls, **kwargs):
32
- """
33
- BuilderConfig for volcanic-plumes.
34
-
35
- Args:
36
- data_urls: `dict`, name to url to download the zip file from.
37
- **kwargs: keyword arguments forwarded to super.
38
- """
39
- super(VOLCANICPLUMESConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
40
- self.data_urls = data_urls
41
-
42
-
43
- class VOLCANICPLUMES(datasets.GeneratorBasedBuilder):
44
- """volcanic-plumes object detection dataset"""
45
-
46
- VERSION = datasets.Version("1.0.0")
47
- BUILDER_CONFIGS = [
48
- VOLCANICPLUMESConfig(
49
- name="full",
50
- description="Full version of volcanic-plumes dataset.",
51
- data_urls={
52
- "train": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/train.zip",
53
- "validation": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/valid.zip",
54
- "test": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/test.zip",
55
- },
56
- ),
57
- VOLCANICPLUMESConfig(
58
- name="mini",
59
- description="Mini version of volcanic-plumes dataset.",
60
- data_urls={
61
- "train": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/valid-mini.zip",
62
- "validation": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/valid-mini.zip",
63
- "test": "https://huggingface.co/datasets/edouard-rolland/volcanic-plumes/resolve/main/data/valid-mini.zip",
64
- },
65
- )
66
- ]
67
-
68
- def _info(self):
69
- features = datasets.Features(
70
- {
71
- "image_id": datasets.Value("int64"),
72
- "image": datasets.Image(),
73
- "width": datasets.Value("int32"),
74
- "height": datasets.Value("int32"),
75
- "objects": datasets.Sequence(
76
- {
77
- "id": datasets.Value("int64"),
78
- "area": datasets.Value("int64"),
79
- "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
80
- "category": datasets.ClassLabel(names=_CATEGORIES),
81
- }
82
- ),
83
- }
84
- )
85
- return datasets.DatasetInfo(
86
- features=features,
87
- homepage=_HOMEPAGE,
88
- citation=_CITATION,
89
- license=_LICENSE,
90
- )
91
-
92
- def _split_generators(self, dl_manager):
93
- data_files = dl_manager.download_and_extract(self.config.data_urls)
94
- return [
95
- datasets.SplitGenerator(
96
- name=datasets.Split.TRAIN,
97
- gen_kwargs={
98
- "folder_dir": data_files["train"],
99
- },
100
- ),
101
- datasets.SplitGenerator(
102
- name=datasets.Split.VALIDATION,
103
- gen_kwargs={
104
- "folder_dir": data_files["validation"],
105
- },
106
- ),
107
- datasets.SplitGenerator(
108
- name=datasets.Split.TEST,
109
- gen_kwargs={
110
- "folder_dir": data_files["test"],
111
- },
112
- ),
113
- ]
114
-
115
- def _generate_examples(self, folder_dir):
116
- def process_annot(annot, category_id_to_category):
117
- return {
118
- "id": annot["id"],
119
- "area": annot["area"],
120
- "bbox": annot["bbox"],
121
- "category": category_id_to_category[annot["category_id"]],
122
- }
123
-
124
- image_id_to_image = {}
125
- idx = 0
126
-
127
- annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME)
128
- with open(annotation_filepath, "r") as f:
129
- annotations = json.load(f)
130
- category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
131
- image_id_to_annotations = collections.defaultdict(list)
132
- for annot in annotations["annotations"]:
133
- image_id_to_annotations[annot["image_id"]].append(annot)
134
- filename_to_image = {image["file_name"]: image for image in annotations["images"]}
135
-
136
- for filename in os.listdir(folder_dir):
137
- filepath = os.path.join(folder_dir, filename)
138
- if filename in filename_to_image:
139
- image = filename_to_image[filename]
140
- objects = [
141
- process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
142
- ]
143
- with open(filepath, "rb") as f:
144
- image_bytes = f.read()
145
- yield idx, {
146
- "image_id": image["id"],
147
- "image": {"path": filepath, "bytes": image_bytes},
148
- "width": image["width"],
149
- "height": image["height"],
150
- "objects": objects,
151
- }
152
- idx += 1