Datasets:

Size:
n<1K
ArXiv:
DOI:
License:
california_burned_areas / california_burned_areas.py
DarthReca
:hammer: Corrected config
4851a70
raw
history blame
6.64 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import datasets
import h5py
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@article{cabuar,
title={Ca{B}u{A}r: California {B}urned {A}reas dataset for delineation},
author={Rege Cambrin, Daniele and Colomba, Luca and Garza, Paolo},
journal={IEEE Geoscience and Remote Sensing Magazine},
doi={10.1109/MGRS.2023.3292467},
year={2023}
}
"""
# You can copy an official description
_DESCRIPTION = """\
CaBuAr dataset contains images from Sentinel-2 satellites taken before and after a wildfire.
The ground truth masks are provided by the California Department of Forestry and Fire Protection and they are mapped on the images.
"""
_HOMEPAGE = "https://huggingface.co/datasets/DarthReca/california_burned_areas"
_LICENSE = "OPENRAIL"
_URLS = "raw/patched/512x512.hdf5"
class CaBuArConfig(datasets.BuilderConfig):
"""BuilderConfig for CaBuAr.
Parameters
----------
load_prefire: bool
whether to load prefire data
train_folds: List[int]
list of folds to use for training
validation_folds: List[int]
list of folds to use for validation
test_folds: List[int]
list of folds to use for testing
**kwargs
keyword arguments forwarded to super.
"""
def __init__(
self,
load_prefire: bool,
train_folds: List[int],
validation_folds: List[int],
test_folds: List[int],
**kwargs
):
super(CaBuArConfig, self).__init__(**kwargs)
self.load_prefire = load_prefire
self.train_folds = train_folds
self.validation_folds = validation_folds
self.test_folds = test_folds
class CaBuAr(datasets.GeneratorBasedBuilder):
"""California Burned Areas dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
CaBuArConfig(
name="post-fire",
version=VERSION,
description="Post-fire only version of the dataset",
load_prefire=False,
train_folds=None,
validation_folds=None,
test_folds=None,
),
CaBuArConfig(
name="pre-post-fire",
version=VERSION,
description="Pre-fire and post-fire version of the dataset",
load_prefire=True,
train_folds=None,
validation_folds=None,
test_folds=None,
),
]
DEFAULT_CONFIG_NAME = "post-fire"
BUILDER_CONFIG_CLASS = CaBuArConfig
def _info(self):
if self.config.name == "pre-post-fire":
features = datasets.Features(
{
"post_fire": datasets.Array3D((512, 512, 12), dtype="uint16"),
"pre_fire": datasets.Array3D((512, 512, 12), dtype="uint16"),
"mask": datasets.Array3D((512, 512, 1), dtype="uint16"),
}
)
else:
features = datasets.Features(
{
"post_fire": datasets.Array3D((512, 512, 12), dtype="uint16"),
"mask": datasets.Array3D((512, 512, 12), dtype="uint16"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
h5_file = dl_manager.download(_URLS)
# Raise ValueError if train_folds, validation_folds or test_folds are not set
if (
self.config.train_folds is None
or self.config.validation_folds is None
or self.config.test_folds is None
):
raise ValueError("train_folds, validation_folds and test_folds must be set")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"folds": self.config.train_folds,
"load_prefire": self.config.load_prefire,
"filepath": h5_file,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"folds:": self.config.validation_folds,
"load_prefire": self.config.load_prefire,
"filepath": h5_file,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"folds": self.config.test_folds,
"load_prefire": self.config.load_prefire,
"filepath": h5_file,
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, folds: List[int], load_prefire: bool, filepath):
with h5py.File(filepath, "r") as f:
for uuid, values in f.items():
if values.attrs["fold"] not in folds:
continue
if load_prefire and "pre_fire" not in values:
continue
sample = {
"post_fire": values["post_fire"][...],
"mask": values["mask"][...],
}
if load_prefire:
sample["pre_fire"] = values["pre_fire"][...]
yield uuid, sample