Datasets:

DOI:
License:
dataset_script.py → quakeflow_nc.py RENAMED
@@ -11,8 +11,10 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
 
14
  # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
 
16
 
17
 
18
  import csv
@@ -38,7 +40,7 @@ year={2020}
38
  # TODO: Add description of the dataset here
39
  # You can copy an official description
40
  _DESCRIPTION = """\
41
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
42
  """
43
 
44
  # TODO: Add a link to an official homepage for the dataset here
@@ -52,13 +54,13 @@ _LICENSE = ""
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
  _REPO = "https://huggingface.co/datasets/AI4EPS/QuakeFlow_NC/resolve/main/data"
54
  _URLS = {
55
- "NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(36)]
56
  }
57
 
58
 
59
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
60
- class NewDataset(datasets.GeneratorBasedBuilder):
61
- """TODO: Short description of my dataset."""
62
 
63
  VERSION = datasets.Version("1.1.0")
64
 
@@ -85,7 +87,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
85
  {
86
  "event_id": datasets.Value("string"),
87
  "station_id": datasets.Value("string"),
88
- "waveform": datasets.Array2D(shape=(3, 1200), dtype="float32"),
89
  }
90
  )
91
  return datasets.DatasetInfo(
@@ -114,6 +116,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
114
  urls = _URLS[self.config.name]
115
  # files = dl_manager.download(urls)
116
  files = dl_manager.download_and_extract(urls)
 
117
 
118
  return [
119
  datasets.SplitGenerator(
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+
15
  # TODO: Address all TODOs and remove all explanatory comments
16
+ # Lint as: python3
17
+ """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
18
 
19
 
20
  import csv
 
40
  # TODO: Add description of the dataset here
41
  # You can copy an official description
42
  _DESCRIPTION = """\
43
+ A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format.
44
  """
45
 
46
  # TODO: Add a link to an official homepage for the dataset here
 
54
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
  _REPO = "https://huggingface.co/datasets/AI4EPS/QuakeFlow_NC/resolve/main/data"
56
  _URLS = {
57
+ "NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)]
58
  }
59
 
60
 
61
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
62
+ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
63
+ """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
64
 
65
  VERSION = datasets.Version("1.1.0")
66
 
 
87
  {
88
  "event_id": datasets.Value("string"),
89
  "station_id": datasets.Value("string"),
90
+ "waveform": datasets.Array2D(shape=(3, 12000), dtype="float32"),
91
  }
92
  )
93
  return datasets.DatasetInfo(
 
116
  urls = _URLS[self.config.name]
117
  # files = dl_manager.download(urls)
118
  files = dl_manager.download_and_extract(urls)
119
+ # files = ["./data/ncedc_event_dataset_000.h5"]
120
 
121
  return [
122
  datasets.SplitGenerator(