kylewhy commited on
Commit
4fd86c8
1 Parent(s): fa3c8cd

remove batched operation and leave it to sampler

Browse files
Files changed (2) hide show
  1. README.md +30 -17
  2. quakeflow_nc.py +29 -78
README.md CHANGED
@@ -84,7 +84,21 @@ import torch
84
  from torch.utils.data import Dataset, IterableDataset, DataLoader
85
  from datasets import load_dataset
86
  ```
87
- We have 2 configurations for the dataset: `NCEDC` and `NCEDC_Batch`. `NCEDC` return event-based samples(with 10 stations each) one by one, while `NCEDC_Batch` return event-based samples in specified batch size and specified stations number(default:`batch_size=16`,`num_stations_list=[5, 10, 20]`). The default configuration is `NCEDC`. You can specify the configuration by argument `name`. For example:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  ```python
89
  # load dataset
90
  # ATTENTION: Streaming(Iterable Dataset) is difficult to support because of the feature of HDF5
@@ -96,8 +110,8 @@ quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", split="train")
96
  # or
97
  quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="NCEDC", split="train")
98
 
99
- # to load "NCEDC_Batch"
100
- quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="NCEDC_Batch", split="train")
101
  ```
102
 
103
  If you want to use the first several shards of the dataset, you can download the script `quakeflow_nc.py` and change the code as below:
@@ -145,38 +159,37 @@ for batch in dataloader:
145
  break
146
  ```
147
 
148
- #### Usage for `NCEDC_Batch`
149
- Then you can change the dataset into PyTorch format iterable dataset, and view the first sample:
 
150
  ```python
151
- quakeflow_batch = datasets.load_dataset("AI4EPS/quakeflow_nc", split="train", name="NCEDC_Batch", batch_size=16, num_stations_list=[5,10,20])
152
 
153
  # for PyTorch DataLoader, we need to divide the dataset into several shards
154
  num_workers=4
155
- quakeflow_batch = quakeflow_batch.to_iterable_dataset(num_shards=num_workers)
156
  # because add examples formatting to get tensors when using the "torch" format
157
  # has not been implemented yet, we need to manually add the formatting
158
- quakeflow_batch = quakeflow_batch.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
159
  def reorder_keys(example):
160
- example["waveform"] = example["waveform"].permute(1,2,3,0).contiguous()
161
- example["phase_pick"] = example["phase_pick"].permute(1,2,3,0).contiguous()
162
- example["station_location"] = example["station_location"].permute(1,0,2).contiguous()
163
  return example
164
 
165
- quakeflow_batch = quakeflow_batch.map(reorder_keys)
166
 
167
  try:
168
- isinstance(quakeflow_batch, torch.utils.data.IterableDataset)
169
  except:
170
  raise Exception("quakeflow_nc is not an IterableDataset")
171
 
172
  data_loader = DataLoader(
173
- quakeflow_batch,
174
- sampler=None,
175
- collate_fn=None,
176
  num_workers=num_workers,
177
  )
178
 
179
- for batch in quakeflow_batch:
180
  print("\nIterable test\n")
181
  print(batch.keys())
182
  for key in batch.keys():
 
84
  from torch.utils.data import Dataset, IterableDataset, DataLoader
85
  from datasets import load_dataset
86
  ```
87
+ We have 2 configurations for the dataset: `NCEDC` and `NCEDC_full_size`. They all return event-based samples one by one. But `NCEDC` returns samples with 10 stations each, while `NCEDC_full_size` return samples with stations same as the original data.
88
+
89
+ The sample of `NCEDC` is a dictionary with the following keys:
90
+ - `waveform`: the waveform with shape `(3, nt, n_sta)`, the first dimension is 3 components, the second dimension is the number of time samples, the third dimension is the number of stations
91
+ - `phase_pick`: the probability of the phase pick with shape `(3, nt, n_sta)`, the first dimension is noise, P and S
92
+ - `event_location`: the event location with shape `(4,)`, including latitude, longitude, depth and time
93
+ - `station_location`: the station location with shape `(n_sta, 3)`, the first dimension is latitude, longitude and depth
94
+
95
+ Because Huggingface datasets only support dynamic size on first dimension, so the sample of `NCEDC_full_size` is a dictionary with the following keys:
96
+ - `waveform`: the waveform with shape `(n_sta, 3, nt)`,
97
+ - `phase_pick`: the probability of the phase pick with shape `(n_sta, 3, nt)`
98
+ - `event_location`: the event location with shape `(4,)`
99
+ - `station_location`: the station location with shape `(n_sta, 3)`, the first dimension is latitude, longitude and depth
100
+
101
+ The default configuration is `NCEDC`. You can specify the configuration by argument `name`. For example:
102
  ```python
103
  # load dataset
104
  # ATTENTION: Streaming(Iterable Dataset) is difficult to support because of the feature of HDF5
 
110
  # or
111
  quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="NCEDC", split="train")
112
 
113
+ # to load "NCEDC_full_size"
114
+ quakeflow_nc = load_dataset("AI4EPS/quakeflow_nc", name="NCEDC_full_size", split="train")
115
  ```
116
 
117
  If you want to use the first several shards of the dataset, you can download the script `quakeflow_nc.py` and change the code as below:
 
159
  break
160
  ```
161
 
162
+ #### Usage for `NCEDC_full_size`
163
+
164
+ Then you can change the dataset into PyTorch format dataset, and view the first sample (Don't forget to reorder the keys):
165
  ```python
166
+ quakeflow_nc = datasets.load_dataset("AI4EPS/quakeflow_nc", split="train", name="NCEDC_full_size")
167
 
168
  # for PyTorch DataLoader, we need to divide the dataset into several shards
169
  num_workers=4
170
+ quakeflow_nc = quakeflow_nc.to_iterable_dataset(num_shards=num_workers)
171
  # because add examples formatting to get tensors when using the "torch" format
172
  # has not been implemented yet, we need to manually add the formatting
173
+ quakeflow_nc = quakeflow_nc.map(lambda x: {key: torch.from_numpy(np.array(value, dtype=np.float32)) for key, value in x.items()})
174
  def reorder_keys(example):
175
+ example["waveform"] = example["waveform"].permute(1,2,0).contiguous()
176
+ example["phase_pick"] = example["phase_pick"].permute(1,2,0).contiguous()
 
177
  return example
178
 
179
+ quakeflow_nc = quakeflow_nc.map(reorder_keys)
180
 
181
  try:
182
+ isinstance(quakeflow_nc, torch.utils.data.IterableDataset)
183
  except:
184
  raise Exception("quakeflow_nc is not an IterableDataset")
185
 
186
  data_loader = DataLoader(
187
+ quakeflow_nc,
188
+ batch_size=1,
 
189
  num_workers=num_workers,
190
  )
191
 
192
+ for batch in quakeflow_nc:
193
  print("\nIterable test\n")
194
  print(batch.keys())
195
  for key in batch.keys():
quakeflow_nc.py CHANGED
@@ -17,16 +17,10 @@
17
  """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
18
 
19
 
20
- import csv
21
- import json
22
- import os
23
  import h5py
24
  import numpy as np
25
  import torch
26
- import fsspec
27
- from glob import glob
28
  from typing import Dict, List, Optional, Tuple, Union
29
- from collections import defaultdict
30
 
31
  import datasets
32
 
@@ -60,7 +54,7 @@ _LICENSE = ""
60
  _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
61
  _URLS = {
62
  "NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)],
63
- "NCEDC_Batch": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)],
64
  }
65
 
66
  class BatchBuilderConfig(datasets.BuilderConfig):
@@ -74,12 +68,13 @@ class BatchBuilderConfig(datasets.BuilderConfig):
74
  super().__init__(**kwargs)
75
  self.batch_size = batch_size
76
  self.num_stations_list = num_stations_list
77
- #batch_size: int=None
78
- #num_stations_list: List=None
79
 
80
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
81
  class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
82
  """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
 
 
83
 
84
  degree2km = 111.32
85
  nt = 8192
@@ -88,8 +83,6 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
88
  sampling_rate=100.0
89
  num_stations = 10
90
 
91
- VERSION = datasets.Version("1.1.0")
92
-
93
  # This is an example of a dataset with multiple configurations.
94
  # If you don't want/need to define several sub-sets in your dataset,
95
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
@@ -104,11 +97,11 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
104
 
105
  # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
106
  BUILDER_CONFIGS = [
107
- BatchBuilderConfig(name="NCEDC", version=VERSION, description="yield event-based samples one by one, the", batch_size=1, num_stations_list=[num_stations]),
108
- BatchBuilderConfig(name="NCEDC_Batch", version=VERSION, description="This part of my dataset covers a first domain", batch_size=16, num_stations_list=[5, 10, 20]),
109
  ]
110
 
111
- DEFAULT_CONFIG_NAME = "NCEDC_Batch" # It's not mandatory to have a default configuration. Just use one if it make sense.
112
 
113
  def _info(self):
114
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -121,13 +114,13 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
121
  "station_location": datasets.Array2D(shape=(self.num_stations, 3), dtype="float32"),
122
  })
123
 
124
- elif self.config.name=="NCEDC_Batch":
125
  features=datasets.Features(
126
  {
127
- "waveform": datasets.Array4D(shape=(None, self.config.batch_size, 3, self.nt), dtype='float32'),# datasets.Array4D(shape=(self.config.batch_size, 3, self.nt, self.num_stations), dtype='float32'),
128
- "phase_pick": datasets.Array4D(shape=(None, self.config.batch_size, 3, self.nt), dtype='float32'),
129
- "event_location": datasets.Array2D(shape=(self.config.batch_size, 4), dtype="float32"),
130
- "station_location": datasets.Array3D(shape=(None, self.config.batch_size, 3), dtype="float32"),
131
  }
132
  )
133
 
@@ -166,8 +159,6 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
166
  gen_kwargs={
167
  "filepath": files,
168
  "split": "train",
169
- "batch_size": self.config.batch_size,
170
- "num_stations_list": self.config.num_stations_list,
171
  },
172
  ),
173
  # datasets.SplitGenerator(
@@ -191,18 +182,10 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
191
 
192
 
193
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
194
- def _generate_examples(self, filepath, split, batch_size, num_stations_list):
195
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
196
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
197
- num_batches = 0
198
-
199
- num_stations_list = np.array(num_stations_list)
200
- if self.config.name=="NCEDC_Batch":
201
- waveform_buffer_per_group = defaultdict(list)
202
- phase_pick_buffer_per_group = defaultdict(list)
203
- event_location_buffer_per_group = defaultdict(list)
204
- station_location_buffer_per_group = defaultdict(list)
205
-
206
  for file in filepath:
207
  with h5py.File(file, "r") as fp:
208
  # for event_id in sorted(list(fp.keys())):
@@ -210,15 +193,11 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
210
  event = fp[event_id]
211
  station_ids = list(event.keys())
212
 
213
- if len(station_ids) < num_stations_list[0]:
214
- continue
215
-
216
- if batch_size==1 and self.config.name=="NCEDC":
217
- num_stations = num_stations_list[0]
218
- elif self.config.name=="NCEDC_Batch":
219
- num_stations=num_stations_list[num_stations_list<=len(station_ids)][-1]
220
-
221
- station_ids = np.random.choice(station_ids, num_stations, replace=False)
222
 
223
  waveforms = np.zeros([3, self.nt, len(station_ids)])
224
  phase_pick = np.zeros_like(waveforms)
@@ -240,49 +219,21 @@ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
240
  waveforms = (waveforms - np.mean(waveforms, axis=1, keepdims=True)) / std
241
  waveforms = waveforms.astype(np.float32)
242
 
243
- if batch_size==1 and self.config.name=="NCEDC":
244
  yield event_id, {
245
  "waveform": torch.from_numpy(waveforms).float(),
246
  "phase_pick": torch.from_numpy(phase_pick).float(),
247
- "event_location": event_location,
248
- "station_location": station_location,
249
  }
250
- elif self.config.name=="NCEDC_Batch":
251
-
252
- waveform_buffer_per_group[num_stations].append(waveforms)
253
- phase_pick_buffer_per_group[num_stations].append(phase_pick)
254
- event_location_buffer_per_group[num_stations].append(event_location)
255
- station_location_buffer_per_group[num_stations].append(station_location)
256
 
257
- if len(waveform_buffer_per_group[num_stations])==batch_size:
258
- yield num_batches, {
259
- "waveform": torch.from_numpy(np.stack(waveform_buffer_per_group[num_stations], axis=0)).float().permute(3,0,1,2),
260
- "phase_pick": torch.from_numpy(np.stack(phase_pick_buffer_per_group[num_stations], axis=0)).float().permute(3,0,1,2),
261
- "event_location": torch.from_numpy(np.stack(event_location_buffer_per_group[num_stations], axis=0)).float(),
262
- "station_location": torch.from_numpy(np.stack(station_location_buffer_per_group[num_stations], axis=0)).float().permute(1,0,2),
263
- }
264
- del waveform_buffer_per_group[num_stations]
265
- del phase_pick_buffer_per_group[num_stations]
266
- del event_location_buffer_per_group[num_stations]
267
- del station_location_buffer_per_group[num_stations]
268
- num_batches += 1
269
- assert len(waveform_buffer_per_group[num_stations])<batch_size, "batch size is not correct"
270
-
271
- '''
272
- # drop_last=False
273
- if self.config.name=="NCEDC_Batch":
274
- for num_stations in waveform_buffer_per_group:
275
- yield event_id, {
276
- "waveform": torch.from_numpy(np.stack(waveform_buffer_per_group, axis=0)).float(),
277
- "phase_pick": torch.from_numpy(np.stack(phase_pick_buffer_per_group, axis=0)).float(),
278
- "event_location": np.stack(event_location_buffer_per_group, axis=0),
279
- "station_location": np.stack(station_location_buffer_per_group, axis=0),
280
- }
281
- del waveform_buffer_per_group[num_stations]
282
- del phase_pick_buffer_per_group[num_stations]
283
- del event_location_buffer_per_group[num_stations]
284
- del station_location_buffer_per_group[num_stations]
285
- '''
286
 
287
 
288
  def generate_label(phase_list, label_width=[150, 150], nt=8192):
 
17
  """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
18
 
19
 
 
 
 
20
  import h5py
21
  import numpy as np
22
  import torch
 
 
23
  from typing import Dict, List, Optional, Tuple, Union
 
24
 
25
  import datasets
26
 
 
54
  _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
55
  _URLS = {
56
  "NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)],
57
+ "NCEDC_full_size": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)],
58
  }
59
 
60
  class BatchBuilderConfig(datasets.BuilderConfig):
 
68
  super().__init__(**kwargs)
69
  self.batch_size = batch_size
70
  self.num_stations_list = num_stations_list
71
+
 
72
 
73
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
74
  class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
75
  """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
76
+
77
+ VERSION = datasets.Version("1.1.0")
78
 
79
  degree2km = 111.32
80
  nt = 8192
 
83
  sampling_rate=100.0
84
  num_stations = 10
85
 
 
 
86
  # This is an example of a dataset with multiple configurations.
87
  # If you don't want/need to define several sub-sets in your dataset,
88
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
 
97
 
98
  # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
99
  BUILDER_CONFIGS = [
100
+ datasets.BuilderConfig(name="NCEDC", version=VERSION, description="yield event-based samples one by one, the number of sample stations is fixed(default: 10)"),
101
+ datasets.BuilderConfig(name="NCEDC_full_size", version=VERSION, description="yield event-based samples one by one, the number of sample stations is the same as the number of stations in the event"),
102
  ]
103
 
104
+ DEFAULT_CONFIG_NAME = "NCEDC" # It's not mandatory to have a default configuration. Just use one if it make sense.
105
 
106
  def _info(self):
107
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
114
  "station_location": datasets.Array2D(shape=(self.num_stations, 3), dtype="float32"),
115
  })
116
 
117
+ elif self.config.name=="NCEDC_full_size":
118
  features=datasets.Features(
119
  {
120
+ "waveform": datasets.Array3D(shape=(None, 3, self.nt), dtype='float32'),
121
+ "phase_pick": datasets.Array3D(shape=(None, 3, self.nt), dtype='float32'),
122
+ "event_location": datasets.Sequence(datasets.Value("float32")),
123
+ "station_location": datasets.Array2D(shape=(None, 3), dtype="float32"),
124
  }
125
  )
126
 
 
159
  gen_kwargs={
160
  "filepath": files,
161
  "split": "train",
 
 
162
  },
163
  ),
164
  # datasets.SplitGenerator(
 
182
 
183
 
184
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
185
+ def _generate_examples(self, filepath, split):
186
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
187
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
188
+ num_stations = self.num_stations
 
 
 
 
 
 
 
 
189
  for file in filepath:
190
  with h5py.File(file, "r") as fp:
191
  # for event_id in sorted(list(fp.keys())):
 
193
  event = fp[event_id]
194
  station_ids = list(event.keys())
195
 
196
+ if self.config.name=="NCEDC":
197
+ if len(station_ids) < num_stations:
198
+ continue
199
+ else:
200
+ station_ids = np.random.choice(station_ids, num_stations, replace=False)
 
 
 
 
201
 
202
  waveforms = np.zeros([3, self.nt, len(station_ids)])
203
  phase_pick = np.zeros_like(waveforms)
 
219
  waveforms = (waveforms - np.mean(waveforms, axis=1, keepdims=True)) / std
220
  waveforms = waveforms.astype(np.float32)
221
 
222
+ if self.config.name=="NCEDC":
223
  yield event_id, {
224
  "waveform": torch.from_numpy(waveforms).float(),
225
  "phase_pick": torch.from_numpy(phase_pick).float(),
226
+ "event_location": torch.from_numpy(np.array(event_location)).float(),
227
+ "station_location": torch.from_numpy(np.array(station_location)).float(),
228
  }
229
+ elif self.config.name=="NCEDC_full_size":
 
 
 
 
 
230
 
231
+ yield event_id, {
232
+ "waveform": torch.from_numpy(waveforms).float().permute(2,0,1),
233
+ "phase_pick": torch.from_numpy(phase_pick).float().permute(2,0,1),
234
+ "event_location": torch.from_numpy(np.array(event_location)).float(),
235
+ "station_location": torch.from_numpy(np.array(station_location)).float(),
236
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
 
238
 
239
  def generate_label(phase_list, label_width=[150, 150], nt=8192):