Rhodes commited on
Commit
73c7a43
1 Parent(s): 76c61bf

:new: Added epicenter mode

Browse files
Files changed (2) hide show
  1. epicenters.parquet +3 -0
  2. quakeset.py +77 -38
epicenters.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0632e0fb00f98c957531f9c5defee2cf41e960ae420f03f19a980d20254b613e
3
+ size 15972926
quakeset.py CHANGED
@@ -19,6 +19,7 @@ import os
19
  import datasets
20
  import h5py
21
  import numpy as np
 
22
 
23
  # Find for instance the citation on arxiv or on the dataset repo/website
24
  _CITATION = """\
@@ -38,7 +39,7 @@ _LICENSE = "OPENRAIL"
38
 
39
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
40
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
41
- _URLS = "earthquakes.h5"
42
 
43
 
44
  class QuakeSet(datasets.GeneratorBasedBuilder):
@@ -61,27 +62,48 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
61
  datasets.BuilderConfig(
62
  name="default",
63
  version=VERSION,
64
- description="Default configuration. No other configuration is available",
65
- )
 
 
 
 
 
66
  ]
67
 
68
  DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
69
 
70
  def _info(self):
71
- features = datasets.Features(
72
- {
73
- "sample_id": datasets.Value("string"),
74
- "pre_post_image": datasets.Array3D(
75
- shape=(4, 512, 512), dtype="float32"
76
- ),
77
- "affected": datasets.ClassLabel(num_classes=2),
78
- "magnitude": datasets.Value("float32"),
79
- "hypocenter": datasets.Sequence(datasets.Value("float32"), length=3),
80
- "epsg": datasets.Value("int32"),
81
- "x": datasets.Sequence(datasets.Value("float32"), length=512),
82
- "y": datasets.Sequence(datasets.Value("float32"), length=512),
83
- }
84
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  return datasets.DatasetInfo(
87
  # This is the description that will appear on the datasets page.
@@ -105,13 +127,13 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
105
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
106
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
107
  urls = _URLS
108
- hdf5_file = dl_manager.download(urls)
109
  return [
110
  datasets.SplitGenerator(
111
  name=datasets.Split.TRAIN,
112
  # These kwargs will be passed to _generate_examples
113
  gen_kwargs={
114
- "filepath": hdf5_file,
115
  "split": "train",
116
  },
117
  ),
@@ -119,7 +141,7 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
119
  name=datasets.Split.VALIDATION,
120
  # These kwargs will be passed to _generate_examples
121
  gen_kwargs={
122
- "filepath": hdf5_file,
123
  "split": "validation",
124
  },
125
  ),
@@ -127,7 +149,7 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
127
  name=datasets.Split.TEST,
128
  # These kwargs will be passed to _generate_examples
129
  gen_kwargs={
130
- "filepath": hdf5_file,
131
  "split": "test",
132
  },
133
  ),
@@ -136,8 +158,9 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
136
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
137
  def _generate_examples(self, filepath, split):
138
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
 
139
  sample_ids = []
140
- with h5py.File(filepath) as f:
141
  for key, patches in f.items():
142
  attributes = dict(f[key].attrs)
143
  if attributes["split"] != split:
@@ -153,14 +176,6 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
153
  if "x" in sample_id or "y" in sample_id:
154
  continue
155
 
156
- resource_id, patch_id = sample_id.split("/")
157
- x = f[resource_id]["x"][...]
158
- y = f[resource_id]["y"][...]
159
- x_start = int(patch_id.split("_")[1]) % (x.shape[0] // 512)
160
- y_start = int(patch_id.split("_")[1]) // (x.shape[0] // 512)
161
- x = x[x_start * 512 : (x_start + 1) * 512]
162
- y = y[y_start * 512 : (y_start + 1) * 512]
163
-
164
  pre_key = "pre" if label == 1 else "before"
165
  post_key = "post" if label == 1 else "pre"
166
  pre_sample = f[sample_id][pre_key][...]
@@ -170,14 +185,38 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
170
  sample = np.concatenate(
171
  [pre_sample, post_sample], axis=0, dtype=np.float32
172
  )
173
-
174
- yield f"{sample_id}/{post_key}", {
175
- "sample_id": f"{sample_id}/{post_key}",
176
  "pre_post_image": sample,
177
- "affected": label,
178
- "magnitude": np.float32(attributes["magnitude"]),
179
- "hypocenter": attributes["hypocenter"],
180
  "epsg": attributes["epsg"],
181
- "x": x.flatten(),
182
- "y": y.flatten(),
183
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  import datasets
20
  import h5py
21
  import numpy as np
22
+ import pandas as pd
23
 
24
  # Find for instance the citation on arxiv or on the dataset repo/website
25
  _CITATION = """\
 
39
 
40
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
41
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
42
+ _URLS = ["earthquakes.h5", "epicenters.parquet"]
43
 
44
 
45
  class QuakeSet(datasets.GeneratorBasedBuilder):
 
62
  datasets.BuilderConfig(
63
  name="default",
64
  version=VERSION,
65
+ description="Default configuration",
66
+ ),
67
+ datasets.BuilderConfig(
68
+ name="epicenter",
69
+ version=VERSION,
70
+ description="Epicenter configuration",
71
+ ),
72
  ]
73
 
74
  DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
75
 
76
  def _info(self):
77
+ if self.config.name == "default":
78
+ features = datasets.Features(
79
+ {
80
+ "pre_post_image": datasets.Array3D(
81
+ shape=(4, 512, 512), dtype="float32"
82
+ ),
83
+ "affected": datasets.ClassLabel(num_classes=2),
84
+ "magnitude": datasets.Value("float32"),
85
+ "hypocenter": datasets.Sequence(
86
+ datasets.Value("float32"), length=3
87
+ ),
88
+ "epsg": datasets.Value("int32"),
89
+ "x": datasets.Sequence(datasets.Value("float32"), length=512),
90
+ "y": datasets.Sequence(datasets.Value("float32"), length=512),
91
+ }
92
+ )
93
+ elif self.config.name == "epicenter":
94
+ features = datasets.Features(
95
+ {
96
+ "pre_post_image": datasets.Array3D(
97
+ shape=(4, 512, 512), dtype="float32"
98
+ ),
99
+ "contains_epicenter": datasets.ClassLabel(num_classes=2),
100
+ "epsg": datasets.Value("int32"),
101
+ "epicenter": datasets.Sequence(datasets.Value("float32"), length=2),
102
+ "lon": datasets.Sequence(datasets.Value("float32"), length=512),
103
+ "lat": datasets.Sequence(datasets.Value("float32"), length=512),
104
+ "affected": datasets.ClassLabel(num_classes=2),
105
+ }
106
+ )
107
 
108
  return datasets.DatasetInfo(
109
  # This is the description that will appear on the datasets page.
 
127
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
128
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
129
  urls = _URLS
130
+ files = dl_manager.download(urls)
131
  return [
132
  datasets.SplitGenerator(
133
  name=datasets.Split.TRAIN,
134
  # These kwargs will be passed to _generate_examples
135
  gen_kwargs={
136
+ "filepath": files,
137
  "split": "train",
138
  },
139
  ),
 
141
  name=datasets.Split.VALIDATION,
142
  # These kwargs will be passed to _generate_examples
143
  gen_kwargs={
144
+ "filepath": files,
145
  "split": "validation",
146
  },
147
  ),
 
149
  name=datasets.Split.TEST,
150
  # These kwargs will be passed to _generate_examples
151
  gen_kwargs={
152
+ "filepath": files,
153
  "split": "test",
154
  },
155
  ),
 
158
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
159
  def _generate_examples(self, filepath, split):
160
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
161
+ df = pd.read_parquet(filepath[1])
162
  sample_ids = []
163
+ with h5py.File(filepath[0]) as f:
164
  for key, patches in f.items():
165
  attributes = dict(f[key].attrs)
166
  if attributes["split"] != split:
 
176
  if "x" in sample_id or "y" in sample_id:
177
  continue
178
 
 
 
 
 
 
 
 
 
179
  pre_key = "pre" if label == 1 else "before"
180
  post_key = "post" if label == 1 else "pre"
181
  pre_sample = f[sample_id][pre_key][...]
 
185
  sample = np.concatenate(
186
  [pre_sample, post_sample], axis=0, dtype=np.float32
187
  )
188
+ sample_key = f"{sample_id}/{post_key}"
189
+ item = {
 
190
  "pre_post_image": sample,
 
 
 
191
  "epsg": attributes["epsg"],
 
 
192
  }
193
+
194
+ if self.config.name == "default":
195
+ resource_id, patch_id = sample_id.split("/")
196
+ x = f[resource_id]["x"][...]
197
+ y = f[resource_id]["y"][...]
198
+ x_start = int(patch_id.split("_")[1]) % (x.shape[0] // 512)
199
+ y_start = int(patch_id.split("_")[1]) // (x.shape[0] // 512)
200
+ x = x[x_start * 512 : (x_start + 1) * 512]
201
+ y = y[y_start * 512 : (y_start + 1) * 512]
202
+ item |= {
203
+ "affected": label,
204
+ "magnitude": np.float32(attributes["magnitude"]),
205
+ "hypocenter": attributes["hypocenter"],
206
+ "x": x.flatten(),
207
+ "y": y.flatten(),
208
+ }
209
+ elif self.config.name == "epicenter":
210
+ selected_infos = df[df["sample_id"] == sample_key]
211
+ if len(selected_infos) > 1:
212
+ print(selected_infos)
213
+ item |= {
214
+ "affected": label,
215
+ "contains_epicenter": label == 1
216
+ and selected_infos["contains_epicenter"].item(),
217
+ "epicenter": selected_infos["epicenter"].item(),
218
+ "lon": selected_infos["lon"].item(),
219
+ "lat": selected_infos["lat"].item(),
220
+ }
221
+
222
+ yield sample_key, item