|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Archival NOAA NWP forecasting data covering most of 2016-2022. """ |
|
|
|
|
|
import xarray as xr |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{ocf:gfs, |
|
title = {GFS Forecast Dataset}, |
|
author={Jacob Bieker |
|
}, |
|
year={2022} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset consists of various NOAA datasets related to operational forecasts, including FNL Analysis files, |
|
GFS operational forecasts, and the raw observations used to initialize the grid. |
|
""" |
|
|
|
_HOMEPAGE = "https://mtarchive.geol.iastate.edu/" |
|
|
|
_LICENSE = "US Government data, Open license, no restrictions" |
|
|
|
|
|
|
|
_URLS = { |
|
"2021": "https://huggingface.co/datasets/openclimatefix/mrms/resolve/main/data/2021/2021.zarr.zip", |
|
"2017": "https://huggingface.co/datasets/openclimatefix/mrms/resolve/main/data/2017/2017.zarr.zip", |
|
"2016": "https://huggingface.co/datasets/openclimatefix/mrms/resolve/main/data/2016/2016.zarr.zip", |
|
"2018": "https://huggingface.co/datasets/openclimatefix/mrms/resolve/main/data/2018/2018.zarr.zip", |
|
"2019": "https://huggingface.co/datasets/openclimatefix/mrms/resolve/main/data/2019/2019.zarr.zip", |
|
"2022": "https://huggingface.co/datasets/openclimatefix/mrms/resolve/main/data/2022/2022.zarr.zip", |
|
} |
|
|
|
_URLS["default"] = {"train": [_URLS["2016"], _URLS["2017"], _URLS["2018"], _URLS["2019"]], "valid": [_URLS["2021"]], "test": [_URLS["2022"]]} |
|
_URLS["default_sequence"] = _URLS["default"] |
|
|
|
class MRMS(datasets.GeneratorBasedBuilder): |
|
"""Archival MRMS Precipitation Rate Radar data for the continental US, covering most of 2016-2022.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="analysis", version=VERSION, description="FNL 0.25 degree Analysis files"), |
|
datasets.BuilderConfig(name="default_sequence", version=VERSION, description="Train on 2016-2020, validate on 2021, test on 2022, with 24 timesteps per example"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "analysis" |
|
|
|
def _info(self): |
|
if "sequence" in self.config.name: |
|
features = datasets.Features( |
|
{ |
|
"precipitation_rate": datasets.Array4D((3500,7000,2), dtype="float16"), |
|
"timestamp": datasets.Sequence(datasets.Value("timestamp[ns]")), |
|
"latitude": datasets.Sequence(datasets.Value("float32")), |
|
"longitude": datasets.Sequence(datasets.Value("float32")) |
|
|
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"precipitation_rate": datasets.Array3D((3500,7000,1), dtype="float16"), |
|
"timestamp": datasets.Value("timestamp[ns]"), |
|
"latitude": datasets.Sequence(datasets.Value("float32")), |
|
"longitude": datasets.Sequence(datasets.Value("float32")) |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS[self.config.name] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": urls, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": urls, |
|
"split": "test" |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": urls, |
|
"split": "valid", |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
if isinstance(filepath, dict): |
|
|
|
filepaths = filepath[split] |
|
else: |
|
filepaths = [filepath] |
|
if "sequence" in self.config.name: |
|
for f in filepaths: |
|
dataset = xr.open_dataset('zip:///::'+f, engine='zarr', chunks={}).sortby("time").drop_duplicates("time") |
|
for idx in range(0, len(dataset["time"].values), 24): |
|
try: |
|
data = dataset.isel(time=slice(idx, idx+24)) |
|
value = {"precipitation_rate": data["unknown"].values, |
|
"timestamp": data["time"].values, |
|
"latitude": data["latitude"].values, |
|
"longitude": data["longitude"].values} |
|
yield idx, value |
|
except: |
|
|
|
continue |
|
else: |
|
for f in filepaths: |
|
dataset = xr.open_dataset('zip:///::'+f, engine='zarr', chunks={}).sortby("time").drop_duplicates("time") |
|
for key, row in enumerate(dataset["time"].values): |
|
try: |
|
data = dataset.sel(time=row) |
|
value = {"precipitation_rate": data["unknown"].values, |
|
"timestamp": data["time"].values, |
|
"latitude": data["latitude"].values, |
|
"longitude": data["longitude"].values} |
|
yield key, value |
|
except: |
|
|
|
continue |
|
|