zhuwq0 commited on
Commit
6e479d6
1 Parent(s): 2510d03

update script

Browse files
Files changed (1) hide show
  1. quakeflow_nc.py +58 -59
quakeflow_nc.py CHANGED
@@ -29,10 +29,9 @@ import datasets
29
  # Find for instance the citation on arxiv or on the dataset repo/website
30
  _CITATION = """\
31
  @InProceedings{huggingface:dataset,
32
- title = {A great new dataset},
33
- author={huggingface, Inc.
34
- },
35
- year={2020}
36
  }
37
  """
38
 
@@ -52,9 +51,10 @@ _LICENSE = ""
52
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
  _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
 
55
  _URLS = {
56
- "NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)],
57
- "NCEDC_full_size": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)],
58
  }
59
 
60
  class BatchBuilderConfig(datasets.BuilderConfig):
@@ -81,7 +81,6 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
81
  feature_nt = 512
82
  feature_scale = int(nt / feature_nt)
83
  sampling_rate=100.0
84
- num_stations = 10
85
 
86
  # This is an example of a dataset with multiple configurations.
87
  # If you don't want/need to define several sub-sets in your dataset,
@@ -97,24 +96,24 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
97
 
98
  # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
99
  BUILDER_CONFIGS = [
100
- datasets.BuilderConfig(name="NCEDC", version=VERSION, description="yield event-based samples one by one, the number of sample stations is fixed(default: 10)"),
101
- datasets.BuilderConfig(name="NCEDC_full_size", version=VERSION, description="yield event-based samples one by one, the number of sample stations is the same as the number of stations in the event"),
102
  ]
103
 
104
- DEFAULT_CONFIG_NAME = "NCEDC" # It's not mandatory to have a default configuration. Just use one if it make sense.
105
 
106
  def _info(self):
107
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
108
- if self.config.name=="NCEDC":
109
  features=datasets.Features(
110
  {
111
- "waveform": datasets.Array3D(shape=(3, self.nt, self.num_stations), dtype='float32'),
112
- "phase_pick": datasets.Array3D(shape=(3, self.nt, self.num_stations), dtype='float32'),
113
  "event_location": datasets.Sequence(datasets.Value("float32")),
114
- "station_location": datasets.Array2D(shape=(self.num_stations, 3), dtype="float32"),
115
  })
116
 
117
- elif self.config.name=="NCEDC_full_size":
118
  features=datasets.Features(
119
  {
120
  "waveform": datasets.Array3D(shape=(None, 3, self.nt), dtype='float32'),
@@ -157,7 +156,7 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
157
  name=datasets.Split.TRAIN,
158
  # These kwargs will be passed to _generate_examples
159
  gen_kwargs={
160
- "filepath": files,
161
  "split": "train",
162
  },
163
  ),
@@ -169,14 +168,14 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
169
  # "split": "dev",
170
  # },
171
  # ),
172
- # datasets.SplitGenerator(
173
- # name=datasets.Split.TEST,
174
- # # These kwargs will be passed to _generate_examples
175
- # gen_kwargs={
176
- # "filepath": os.path.join(data_dir, "test.jsonl"),
177
- # "split": "test"
178
- # },
179
- # ),
180
  ]
181
 
182
 
@@ -185,55 +184,55 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
185
  def _generate_examples(self, filepath, split):
186
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
187
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
188
- num_stations = self.num_stations
189
  for file in filepath:
190
  with h5py.File(file, "r") as fp:
191
  # for event_id in sorted(list(fp.keys())):
192
  for event_id in fp.keys():
193
  event = fp[event_id]
194
  station_ids = list(event.keys())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
196
- if self.config.name=="NCEDC":
197
- if len(station_ids) < num_stations:
198
- continue
199
- else:
200
- station_ids = np.random.choice(station_ids, num_stations, replace=False)
201
-
202
- waveforms = np.zeros([3, self.nt, len(station_ids)])
203
- phase_pick = np.zeros_like(waveforms)
204
- attrs = event.attrs
205
- event_location = [attrs["longitude"], attrs["latitude"], attrs["depth_km"], attrs["event_time_index"]]
206
- station_location = []
207
-
208
- for i, sta_id in enumerate(station_ids):
209
- # trace_id = event_id + "/" + sta_id
210
- waveforms[:, :, i] = event[sta_id][:,:self.nt]
211
- attrs = event[sta_id].attrs
212
- p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
213
- s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
214
- phase_pick[:, :, i] = generate_label([p_picks, s_picks], nt=self.nt)
215
- station_location.append([attrs["longitude"], attrs["latitude"], -attrs["elevation_m"]/1e3])
216
 
217
- std = np.std(waveforms, axis=1, keepdims=True)
218
- std[std == 0] = 1.0
219
- waveforms = (waveforms - np.mean(waveforms, axis=1, keepdims=True)) / std
220
- waveforms = waveforms.astype(np.float32)
221
-
222
- if self.config.name=="NCEDC":
223
  yield event_id, {
224
  "waveform": torch.from_numpy(waveforms).float(),
225
  "phase_pick": torch.from_numpy(phase_pick).float(),
226
  "event_location": torch.from_numpy(np.array(event_location)).float(),
227
  "station_location": torch.from_numpy(np.array(station_location)).float(),
228
  }
229
- elif self.config.name=="NCEDC_full_size":
230
-
231
- yield event_id, {
232
- "waveform": torch.from_numpy(waveforms).float().permute(2,0,1),
233
- "phase_pick": torch.from_numpy(phase_pick).float().permute(2,0,1),
234
- "event_location": torch.from_numpy(np.array(event_location)).float(),
235
- "station_location": torch.from_numpy(np.array(station_location)).float(),
236
- }
237
 
238
 
239
  def generate_label(phase_list, label_width=[150, 150], nt=8192):
 
29
  # Find for instance the citation on arxiv or on the dataset repo/website
30
  _CITATION = """\
31
  @InProceedings{huggingface:dataset,
32
+ title = {NCEDC dataset for QuakeFlow},
33
+ author={Zhu et al.},
34
+ year={2023}
 
35
  }
36
  """
37
 
 
51
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
  _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
54
+ _FILENAMES = ["NC1970-1989.h5", "NC1990-1994.h5", "NC1995-1999.h5", "NC2000-2004.h5", "NC2005-2009.h5", "NC2010.h5", "NC2011.h5", "NC2012.h5", "NC2013.h5", "NC2014.h5", "NC2015.h5", "NC2016.h5", "NC2017.h5", "NC2018.h5", "NC2019.h5", "NC2020.h5"]
55
  _URLS = {
56
+ "station": [f"{_REPO}/{x}" for f in _FILENAMES],
57
+ "event": [f"{_REPO}/{x}" for x in _FILENAMES],
58
  }
59
 
60
  class BatchBuilderConfig(datasets.BuilderConfig):
 
81
  feature_nt = 512
82
  feature_scale = int(nt / feature_nt)
83
  sampling_rate=100.0
 
84
 
85
  # This is an example of a dataset with multiple configurations.
86
  # If you don't want/need to define several sub-sets in your dataset,
 
96
 
97
  # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
98
  BUILDER_CONFIGS = [
99
+ datasets.BuilderConfig(name="station", version=VERSION, description="yield station-based samples one by one"),
100
+ datasets.BuilderConfig(name="event", version=VERSION, description="yield event-based samples one by one"),
101
  ]
102
 
103
+ DEFAULT_CONFIG_NAME = "station" # It's not mandatory to have a default configuration. Just use one if it make sense.
104
 
105
  def _info(self):
106
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
107
+ if self.config.name=="station":
108
  features=datasets.Features(
109
  {
110
+ "waveform": datasets.Array3D(shape=(3, self.nt), dtype='float32'),
111
+ "phase_pick": datasets.Array3D(shape=(3, self.nt), dtype='float32'),
112
  "event_location": datasets.Sequence(datasets.Value("float32")),
113
+ "station_location": datasets.Sequence(datasets.Value("float32")),
114
  })
115
 
116
+ elif self.config.name=="event":
117
  features=datasets.Features(
118
  {
119
  "waveform": datasets.Array3D(shape=(None, 3, self.nt), dtype='float32'),
 
156
  name=datasets.Split.TRAIN,
157
  # These kwargs will be passed to _generate_examples
158
  gen_kwargs={
159
+ "filepath": files[:-1],
160
  "split": "train",
161
  },
162
  ),
 
168
  # "split": "dev",
169
  # },
170
  # ),
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.TEST,
173
+ # These kwargs will be passed to _generate_examples
174
+ gen_kwargs={
175
+ "filepath": files[-1:],
176
+ "split": "test"
177
+ },
178
+ ),
179
  ]
180
 
181
 
 
184
  def _generate_examples(self, filepath, split):
185
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
186
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
187
+
188
  for file in filepath:
189
  with h5py.File(file, "r") as fp:
190
  # for event_id in sorted(list(fp.keys())):
191
  for event_id in fp.keys():
192
  event = fp[event_id]
193
  station_ids = list(event.keys())
194
+ if self.config.name=="station":
195
+ waveforms = np.zeros([3, self.nt], dtype="float32")
196
+ phase_pick = np.zeros_like(waveforms)
197
+ attrs = event.attrs
198
+ event_location = [attrs["longitude"], attrs["latitude"], attrs["depth_km"], attrs["event_time_index"]]
199
+
200
+ for i, sta_id in enumerate(station_ids):
201
+ waveforms[:, :self.nt] = event[sta_id][:,:self.nt]
202
+ attrs = event[sta_id].attrs
203
+ p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
204
+ s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
205
+ phase_pick[:, :self.nt] = generate_label([p_picks, s_picks], nt=self.nt)
206
+ station_location = [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"]/1e3]
207
+
208
+ yield f"{event_id}/{sta_id}", {
209
+ "waveform": torch.from_numpy(waveforms).float(),
210
+ "phase_pick": torch.from_numpy(phase_pick).float(),
211
+ "event_location": torch.from_numpy(np.array(event_location)).float(),
212
+ "station_location": torch.from_numpy(np.array(station_location)).float(),
213
+ }
214
+
215
+ elif self.config.name=="event":
216
+ waveforms = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
217
+ phase_pick = np.zeros_like(waveforms)
218
+ attrs = event.attrs
219
+ event_location = [attrs["longitude"], attrs["latitude"], attrs["depth_km"], attrs["event_time_index"]]
220
+ station_location = []
221
 
222
+ for i, sta_id in enumerate(station_ids):
223
+ waveforms[i, :, :self.nt] = event[sta_id][:,:self.nt]
224
+ attrs = event[sta_id].attrs
225
+ p_picks = attrs["phase_index"][attrs["phase_type"] == "P"]
226
+ s_picks = attrs["phase_index"][attrs["phase_type"] == "S"]
227
+ phase_pick[i, :, :] = generate_label([p_picks, s_picks], nt=self.nt)
228
+ station_location.append([attrs["longitude"], attrs["latitude"], -attrs["elevation_m"]/1e3])
 
 
 
 
 
 
 
 
 
 
 
 
 
229
 
 
 
 
 
 
 
230
  yield event_id, {
231
  "waveform": torch.from_numpy(waveforms).float(),
232
  "phase_pick": torch.from_numpy(phase_pick).float(),
233
  "event_location": torch.from_numpy(np.array(event_location)).float(),
234
  "station_location": torch.from_numpy(np.array(station_location)).float(),
235
  }
 
 
 
 
 
 
 
 
236
 
237
 
238
  def generate_label(phase_list, label_width=[150, 150], nt=8192):