Datasets:

DOI:
License:
zhuwq0 commited on
Commit
9c0aeec
1 Parent(s): c48fd78
Files changed (1) hide show
  1. quakeflow_nc.py +65 -37
quakeflow_nc.py CHANGED
@@ -52,13 +52,31 @@ _LICENSE = ""
52
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
  _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
55
- _FILENAMES = ["NC1970-1989.h5", "NC1990-1994.h5", "NC1995-1999.h5", "NC2000-2004.h5", "NC2005-2009.h5", "NC2010.h5", "NC2011.h5", "NC2012.h5", "NC2013.h5", "NC2014.h5", "NC2015.h5", "NC2016.h5", "NC2017.h5", "NC2018.h5", "NC2019.h5", "NC2020.h5"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  # _FILENAMES = ["NC2020.h5"]
57
  _URLS = {
58
  "station": [f"{_REPO}/{x}" for x in _FILENAMES],
59
  "event": [f"{_REPO}/{x}" for x in _FILENAMES],
60
  }
61
 
 
62
  class BatchBuilderConfig(datasets.BuilderConfig):
63
  """
64
  yield a batch of event-based sample, so the number of sample stations can vary among batches
@@ -66,6 +84,7 @@ class BatchBuilderConfig(datasets.BuilderConfig):
66
  :param batch_size: number of samples in a batch
67
  :param num_stations_list: possible number of stations in a batch
68
  """
 
69
  def __init__(self, batch_size: int, num_stations_list: List, **kwargs):
70
  super().__init__(**kwargs)
71
  self.batch_size = batch_size
@@ -75,15 +94,15 @@ class BatchBuilderConfig(datasets.BuilderConfig):
75
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
76
  class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
77
  """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
78
-
79
  VERSION = datasets.Version("1.1.0")
80
 
81
  degree2km = 111.32
82
  nt = 8192
83
  feature_nt = 512
84
  feature_scale = int(nt / feature_nt)
85
- sampling_rate=100.0
86
-
87
  # This is an example of a dataset with multiple configurations.
88
  # If you don't want/need to define several sub-sets in your dataset,
89
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
@@ -95,36 +114,39 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
95
  # You will be able to load one or the other configurations in the following list with
96
  # data = datasets.load_dataset('my_dataset', 'first_domain')
97
  # data = datasets.load_dataset('my_dataset', 'second_domain')
98
-
99
  # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
100
  BUILDER_CONFIGS = [
101
  datasets.BuilderConfig(name="station", version=VERSION, description="yield station-based samples one by one"),
102
  datasets.BuilderConfig(name="event", version=VERSION, description="yield event-based samples one by one"),
103
  ]
104
 
105
- DEFAULT_CONFIG_NAME = "station" # It's not mandatory to have a default configuration. Just use one if it make sense.
 
 
106
 
107
  def _info(self):
108
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
109
- if self.config.name=="station":
110
- features=datasets.Features(
111
  {
112
- "waveform": datasets.Array2D(shape=(3, self.nt), dtype='float32'),
113
- "phase_pick": datasets.Array2D(shape=(3, self.nt), dtype='float32'),
114
  "event_location": datasets.Sequence(datasets.Value("float32")),
115
  "station_location": datasets.Sequence(datasets.Value("float32")),
116
- })
117
-
118
- elif self.config.name=="event":
119
- features=datasets.Features(
 
120
  {
121
- "waveform": datasets.Array3D(shape=(None, 3, self.nt), dtype='float32'),
122
- "phase_pick": datasets.Array3D(shape=(None, 3, self.nt), dtype='float32'),
123
  "event_location": datasets.Sequence(datasets.Value("float32")),
124
  "station_location": datasets.Array2D(shape=(None, 3), dtype="float32"),
125
  }
126
  )
127
-
128
  return datasets.DatasetInfo(
129
  # This is the description that will appear on the datasets page.
130
  description=_DESCRIPTION,
@@ -173,14 +195,9 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
173
  datasets.SplitGenerator(
174
  name=datasets.Split.TEST,
175
  # These kwargs will be passed to _generate_examples
176
- gen_kwargs={
177
- "filepath": files[-1:],
178
- "split": "test"
179
- },
180
  ),
181
  ]
182
-
183
-
184
 
185
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
186
  def _generate_examples(self, filepath, split):
@@ -195,20 +212,25 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
195
  for event_id in event_ids:
196
  event = fp[event_id]
197
  station_ids = list(event.keys())
198
- if self.config.name=="station":
199
  waveforms = np.zeros([3, self.nt], dtype="float32")
200
  phase_pick = np.zeros_like(waveforms)
201
  attrs = event.attrs
202
- event_location = [attrs["longitude"], attrs["latitude"], attrs["depth_km"], attrs["event_time_index"]]
 
 
 
 
 
203
 
204
  for i, sta_id in enumerate(station_ids):
205
- # waveforms[:, :self.nt] = event[sta_id][:,:self.nt]
206
- waveforms[:, :self.nt] = event[sta_id][:self.nt,:].T
207
  attrs = event[sta_id].attrs
208
  p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
209
  s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
210
  # phase_pick[:, :self.nt] = generate_label([p_picks, s_picks], nt=self.nt)
211
- station_location = [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"]/1e3]
212
 
213
  yield f"{event_id}/{sta_id}", {
214
  "waveform": torch.from_numpy(waveforms).float(),
@@ -217,22 +239,29 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
217
  "station_location": torch.from_numpy(np.array(station_location)).float(),
218
  }
219
 
220
- elif self.config.name=="event":
221
  waveforms = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
222
  phase_pick = np.zeros_like(waveforms)
223
  attrs = event.attrs
224
- event_location = [attrs["longitude"], attrs["latitude"], attrs["depth_km"], attrs["event_time_index"]]
 
 
 
 
 
225
  station_location = []
226
-
227
  for i, sta_id in enumerate(station_ids):
228
- # waveforms[i, :, :self.nt] = event[sta_id][:,:self.nt]
229
- waveforms[i, :, :self.nt] = event[sta_id][:self.nt,:].T
230
  attrs = event[sta_id].attrs
231
  p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
232
  s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
233
  phase_pick[i, :, :] = generate_label([p_picks, s_picks], nt=self.nt)
234
- station_location.append([attrs["longitude"], attrs["latitude"], -attrs["elevation_m"]/1e3])
235
-
 
 
236
  yield event_id, {
237
  "waveform": torch.from_numpy(waveforms).float(),
238
  "phase_pick": torch.from_numpy(phase_pick).float(),
@@ -242,7 +271,6 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
242
 
243
 
244
  def generate_label(phase_list, label_width=[150, 150], nt=8192):
245
-
246
  target = np.zeros([len(phase_list) + 1, nt], dtype=np.float32)
247
 
248
  for i, (picks, w) in enumerate(zip(phase_list, label_width)):
@@ -254,4 +282,4 @@ def generate_label(phase_list, label_width=[150, 150], nt=8192):
254
 
255
  target[0:1, :] = np.maximum(0, 1 - np.sum(target[1:, :], axis=0, keepdims=True))
256
 
257
- return target
 
52
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
  _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
55
+ _FILENAMES = [
56
+ "NC1970-1989.h5",
57
+ "NC1990-1994.h5",
58
+ "NC1995-1999.h5",
59
+ "NC2000-2004.h5",
60
+ "NC2005-2009.h5",
61
+ "NC2010.h5",
62
+ "NC2011.h5",
63
+ "NC2012.h5",
64
+ "NC2013.h5",
65
+ "NC2014.h5",
66
+ "NC2015.h5",
67
+ "NC2016.h5",
68
+ "NC2017.h5",
69
+ "NC2018.h5",
70
+ "NC2019.h5",
71
+ "NC2020.h5",
72
+ ]
73
  # _FILENAMES = ["NC2020.h5"]
74
  _URLS = {
75
  "station": [f"{_REPO}/{x}" for x in _FILENAMES],
76
  "event": [f"{_REPO}/{x}" for x in _FILENAMES],
77
  }
78
 
79
+
80
  class BatchBuilderConfig(datasets.BuilderConfig):
81
  """
82
  yield a batch of event-based sample, so the number of sample stations can vary among batches
 
84
  :param batch_size: number of samples in a batch
85
  :param num_stations_list: possible number of stations in a batch
86
  """
87
+
88
  def __init__(self, batch_size: int, num_stations_list: List, **kwargs):
89
  super().__init__(**kwargs)
90
  self.batch_size = batch_size
 
94
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
95
  class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
96
  """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
97
+
98
  VERSION = datasets.Version("1.1.0")
99
 
100
  degree2km = 111.32
101
  nt = 8192
102
  feature_nt = 512
103
  feature_scale = int(nt / feature_nt)
104
+ sampling_rate = 100.0
105
+
106
  # This is an example of a dataset with multiple configurations.
107
  # If you don't want/need to define several sub-sets in your dataset,
108
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
 
114
  # You will be able to load one or the other configurations in the following list with
115
  # data = datasets.load_dataset('my_dataset', 'first_domain')
116
  # data = datasets.load_dataset('my_dataset', 'second_domain')
117
+
118
  # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
119
  BUILDER_CONFIGS = [
120
  datasets.BuilderConfig(name="station", version=VERSION, description="yield station-based samples one by one"),
121
  datasets.BuilderConfig(name="event", version=VERSION, description="yield event-based samples one by one"),
122
  ]
123
 
124
+ DEFAULT_CONFIG_NAME = (
125
+ "station" # It's not mandatory to have a default configuration. Just use one if it make sense.
126
+ )
127
 
128
  def _info(self):
129
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
130
+ if self.config.name == "station":
131
+ features = datasets.Features(
132
  {
133
+ "waveform": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
134
+ "phase_pick": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
135
  "event_location": datasets.Sequence(datasets.Value("float32")),
136
  "station_location": datasets.Sequence(datasets.Value("float32")),
137
+ }
138
+ )
139
+
140
+ elif self.config.name == "event":
141
+ features = datasets.Features(
142
  {
143
+ "waveform": datasets.Array3D(shape=(None, 3, self.nt), dtype="float32"),
144
+ "phase_pick": datasets.Array3D(shape=(None, 3, self.nt), dtype="float32"),
145
  "event_location": datasets.Sequence(datasets.Value("float32")),
146
  "station_location": datasets.Array2D(shape=(None, 3), dtype="float32"),
147
  }
148
  )
149
+
150
  return datasets.DatasetInfo(
151
  # This is the description that will appear on the datasets page.
152
  description=_DESCRIPTION,
 
195
  datasets.SplitGenerator(
196
  name=datasets.Split.TEST,
197
  # These kwargs will be passed to _generate_examples
198
+ gen_kwargs={"filepath": files[-1:], "split": "test"},
 
 
 
199
  ),
200
  ]
 
 
201
 
202
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
203
  def _generate_examples(self, filepath, split):
 
212
  for event_id in event_ids:
213
  event = fp[event_id]
214
  station_ids = list(event.keys())
215
+ if self.config.name == "station":
216
  waveforms = np.zeros([3, self.nt], dtype="float32")
217
  phase_pick = np.zeros_like(waveforms)
218
  attrs = event.attrs
219
+ event_location = [
220
+ attrs["longitude"],
221
+ attrs["latitude"],
222
+ attrs["depth_km"],
223
+ attrs["event_time_index"],
224
+ ]
225
 
226
  for i, sta_id in enumerate(station_ids):
227
+ waveforms[:, : self.nt] = event[sta_id][:, : self.nt]
228
+ # waveforms[:, : self.nt] = event[sta_id][: self.nt, :].T
229
  attrs = event[sta_id].attrs
230
  p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
231
  s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
232
  # phase_pick[:, :self.nt] = generate_label([p_picks, s_picks], nt=self.nt)
233
+ station_location = [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
234
 
235
  yield f"{event_id}/{sta_id}", {
236
  "waveform": torch.from_numpy(waveforms).float(),
 
239
  "station_location": torch.from_numpy(np.array(station_location)).float(),
240
  }
241
 
242
+ elif self.config.name == "event":
243
  waveforms = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
244
  phase_pick = np.zeros_like(waveforms)
245
  attrs = event.attrs
246
+ event_location = [
247
+ attrs["longitude"],
248
+ attrs["latitude"],
249
+ attrs["depth_km"],
250
+ attrs["event_time_index"],
251
+ ]
252
  station_location = []
253
+
254
  for i, sta_id in enumerate(station_ids):
255
+ waveforms[i, :, : self.nt] = event[sta_id][:, : self.nt]
256
+ # waveforms[i, :, : self.nt] = event[sta_id][: self.nt, :].T
257
  attrs = event[sta_id].attrs
258
  p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
259
  s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
260
  phase_pick[i, :, :] = generate_label([p_picks, s_picks], nt=self.nt)
261
+ station_location.append(
262
+ [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
263
+ )
264
+
265
  yield event_id, {
266
  "waveform": torch.from_numpy(waveforms).float(),
267
  "phase_pick": torch.from_numpy(phase_pick).float(),
 
271
 
272
 
273
  def generate_label(phase_list, label_width=[150, 150], nt=8192):
 
274
  target = np.zeros([len(phase_list) + 1, nt], dtype=np.float32)
275
 
276
  for i, (picks, w) in enumerate(zip(phase_list, label_width)):
 
282
 
283
  target[0:1, :] = np.maximum(0, 1 - np.sum(target[1:, :], axis=0, keepdims=True))
284
 
285
+ return target