Datasets:

DOI:
License:
zhuwq0 commited on
Commit
e9dd25b
1 Parent(s): eaede39

update quakeflow_nc.py

Browse files
Files changed (1) hide show
  1. quakeflow_nc.py +42 -22
quakeflow_nc.py CHANGED
@@ -17,14 +17,13 @@
17
  """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
18
 
19
 
20
- import h5py
21
- import numpy as np
22
- import torch
23
  from typing import Dict, List, Optional, Tuple, Union
24
- import fsspec
25
 
26
  import datasets
27
-
 
 
 
28
 
29
  # TODO: Add BibTeX citation
30
  # Find for instance the citation on arxiv or on the dataset repo/website
@@ -51,24 +50,45 @@ _LICENSE = ""
51
  # TODO: Add link to the official dataset URLs here
52
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
- _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/data"
55
  _FILES = [
56
- "NC1970-1989.h5",
57
- "NC1990-1994.h5",
58
- "NC1995-1999.h5",
59
- "NC2000-2004.h5",
60
- "NC2005-2009.h5",
61
- "NC2010.h5",
62
- "NC2011.h5",
63
- "NC2012.h5",
64
- "NC2013.h5",
65
- "NC2014.h5",
66
- "NC2015.h5",
67
- "NC2016.h5",
68
- "NC2017.h5",
69
- "NC2018.h5",
70
- "NC2019.h5",
71
- "NC2020.h5",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  ]
73
  _URLS = {
74
  "station": [f"{_REPO}/{x}" for x in _FILES],
 
17
  """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
18
 
19
 
 
 
 
20
  from typing import Dict, List, Optional, Tuple, Union
 
21
 
22
  import datasets
23
+ import fsspec
24
+ import h5py
25
+ import numpy as np
26
+ import torch
27
 
28
  # TODO: Add BibTeX citation
29
  # Find for instance the citation on arxiv or on the dataset repo/website
 
50
  # TODO: Add link to the official dataset URLs here
51
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/waveform_ps_h5"
54
  _FILES = [
55
+ "1987.h5",
56
+ "1988.h5",
57
+ "1989.h5",
58
+ "1990.h5",
59
+ "1991.h5",
60
+ "1992.h5",
61
+ "1993.h5",
62
+ "1994.h5",
63
+ "1995.h5",
64
+ "1996.h5",
65
+ "1997.h5",
66
+ "1998.h5",
67
+ "1999.h5",
68
+ "2000.h5",
69
+ "2001.h5",
70
+ "2002.h5",
71
+ "2003.h5",
72
+ "2004.h5",
73
+ "2005.h5",
74
+ "2006.h5",
75
+ "2007.h5",
76
+ "2008.h5",
77
+ "2009.h5",
78
+ "2010.h5",
79
+ "2011.h5",
80
+ "2012.h5",
81
+ "2013.h5",
82
+ "2014.h5",
83
+ "2015.h5",
84
+ "2016.h5",
85
+ "2017.h5",
86
+ "2018.h5",
87
+ "2019.h5",
88
+ "2020.h5",
89
+ "2021.h5",
90
+ "2022.h5",
91
+ "2023.h5",
92
  ]
93
  _URLS = {
94
  "station": [f"{_REPO}/{x}" for x in _FILES],