Datasets:

DOI:
License:
kylewhy commited on
Commit
ed36017
1 Parent(s): 5247110

fix the bugs

Browse files
Files changed (1) hide show
  1. QuakeFlow_NC.py +164 -0
QuakeFlow_NC.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # TODO: Address all TODOs and remove all explanatory comments
16
+ # Lint as: python3
17
+ """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
18
+
19
+
20
+ import csv
21
+ import json
22
+ import os
23
+ import h5py
24
+ from glob import glob
25
+
26
+ import datasets
27
+
28
+
29
+ # TODO: Add BibTeX citation
30
+ # Find for instance the citation on arxiv or on the dataset repo/website
31
+ _CITATION = """\
32
+ @InProceedings{huggingface:dataset,
33
+ title = {A great new dataset},
34
+ author={huggingface, Inc.
35
+ },
36
+ year={2020}
37
+ }
38
+ """
39
+
40
+ # TODO: Add description of the dataset here
41
+ # You can copy an official description
42
+ _DESCRIPTION = """\
43
+ A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format.
44
+ """
45
+
46
+ # TODO: Add a link to an official homepage for the dataset here
47
+ _HOMEPAGE = ""
48
+
49
+ # TODO: Add the licence for the dataset here if you can find it
50
+ _LICENSE = ""
51
+
52
+ # TODO: Add link to the official dataset URLs here
53
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
54
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
+ _REPO = "https://huggingface.co/datasets/AI4EPS/QuakeFlow_NC/resolve/main/data"
56
+ _URLS = {
57
+ "NCEDC": [f"{_REPO}/ncedc_event_dataset_{i:03d}.h5" for i in range(37)]
58
+ }
59
+
60
+
61
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
62
+ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
63
+ """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
64
+
65
+ VERSION = datasets.Version("1.1.0")
66
+
67
+ # This is an example of a dataset with multiple configurations.
68
+ # If you don't want/need to define several sub-sets in your dataset,
69
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
70
+
71
+ # If you need to make complex sub-parts in the datasets with configurable options
72
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
73
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
74
+
75
+ # You will be able to load one or the other configurations in the following list with
76
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
77
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
78
+ BUILDER_CONFIGS = [
79
+ datasets.BuilderConfig(name="NCEDC", version=VERSION, description="This part of my dataset covers a first domain"),
80
+ ]
81
+
82
+ DEFAULT_CONFIG_NAME = "NCEDC" # It's not mandatory to have a default configuration. Just use one if it make sense.
83
+
84
+ def _info(self):
85
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
86
+ features=datasets.Features(
87
+ {
88
+ "event_id": datasets.Value("string"),
89
+ "station_id": datasets.Value("string"),
90
+ "waveform": datasets.Array2D(shape=(3, 12000), dtype="float32"),
91
+ }
92
+ )
93
+ return datasets.DatasetInfo(
94
+ # This is the description that will appear on the datasets page.
95
+ description=_DESCRIPTION,
96
+ # This defines the different columns of the dataset and their types
97
+ features=features, # Here we define them above because they are different between the two configurations
98
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
99
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
100
+ # supervised_keys=("sentence", "label"),
101
+ # Homepage of the dataset for documentation
102
+ homepage=_HOMEPAGE,
103
+ # License for the dataset if available
104
+ license=_LICENSE,
105
+ # Citation for the dataset
106
+ citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager):
110
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
111
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
112
+
113
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
114
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
115
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
116
+ urls = _URLS[self.config.name]
117
+ # files = dl_manager.download(urls)
118
+ files = dl_manager.download_and_extract(urls)
119
+ # files = ["./data/ncedc_event_dataset_000.h5"]
120
+
121
+ return [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TRAIN,
124
+ # These kwargs will be passed to _generate_examples
125
+ gen_kwargs={
126
+ "filepath": files,
127
+ "split": "train",
128
+ },
129
+ ),
130
+ # datasets.SplitGenerator(
131
+ # name=datasets.Split.VALIDATION,
132
+ # # These kwargs will be passed to _generate_examples
133
+ # gen_kwargs={
134
+ # "filepath": os.path.join(data_dir, "dev.jsonl"),
135
+ # "split": "dev",
136
+ # },
137
+ # ),
138
+ # datasets.SplitGenerator(
139
+ # name=datasets.Split.TEST,
140
+ # # These kwargs will be passed to _generate_examples
141
+ # gen_kwargs={
142
+ # "filepath": os.path.join(data_dir, "test.jsonl"),
143
+ # "split": "test"
144
+ # },
145
+ # ),
146
+ ]
147
+
148
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
149
+ def _generate_examples(self, filepath, split):
150
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
151
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
152
+
153
+ for file in filepath:
154
+ with h5py.File(file, "r") as fp:
155
+ for event_id in sorted(list(fp.keys())):
156
+ event = fp[event_id]
157
+ for station_id in sorted(list(event.keys())):
158
+ station = event[station_id]
159
+ # print(f"{event_id = } {station_id = }")
160
+ yield event_id + "_" + station_id, {
161
+ "event_id": event_id,
162
+ "station_id": station_id,
163
+ "waveform": station[:],
164
+ }