Datasets:
File size: 18,370 Bytes
9453ae9 062684d 9453ae9 4063393 b0a124e 9453ae9 b0a124e 9453ae9 4a0e081 9453ae9 4a0e081 b0a124e 9453ae9 4a0e081 b0a124e 9453ae9 4a0e081 b0a124e 9453ae9 4a0e081 9453ae9 b0a124e 4a0e081 9453ae9 b0a124e 9453ae9 4a0e081 9453ae9 4a0e081 9453ae9 b0a124e 4a0e081 9453ae9 4a0e081 9453ae9 4063393 9453ae9 b0a124e 9453ae9 b0a124e 9453ae9 b0a124e 9453ae9 b0a124e 4a0e081 9453ae9 4063393 9453ae9 4063393 9453ae9 4a0e081 9453ae9 4063393 9453ae9 4a0e081 9453ae9 4063393 9453ae9 4063393 4a0e081 4063393 4a0e081 4063393 4a0e081 4063393 4a0e081 4063393 4a0e081 4063393 4a0e081 4063393 b0a124e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 |
# Copyright 2025 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file includes code adapted from the original work by EPFL and Apple Inc.,
# licensed under the Apache License, Version 2.0.
# Source: https://github.com/apple/ml-4m/
import os
import io
import re
import zarr
import torch
import fsspec
import braceexpand
import numpy as np
import albumentations
import warnings
import webdataset as wds
from collections.abc import Callable, Iterable
from torch.utils.data._utils.collate import default_collate
from webdataset.handlers import warn_and_continue
# Definition of all shard files in TerraMesh
split_files = {
"ssl4eos12": {
"train": ["ssl4eos12_shard_{000794..000889}.tar"],
"val": ["ssl4eos12_shard_000009.tar"],
},
"majortom": {
"train": ["majortom_shard_{000001..000793}.tar"],
"val": ["majortom_shard_{000001..000008}.tar"],
},
"combined": {
"train": ["majortom_shard_{000001..000793}.tar", "ssl4eos12_shard_{000794..000889}.tar"],
"val": ["majortom_shard_{000001..000008}.tar", "ssl4eos12_shard_000009.tar"],
}
}
statistics = {
"mean": {
"S2L1C": [2357.090, 2137.398, 2018.799, 2082.998, 2295.663, 2854.548, 3122.860, 3040.571, 3306.491, 1473.849,
506.072, 2472.840, 1838.943],
"S2L2A": [1390.461, 1503.332, 1718.211, 1853.926, 2199.116, 2779.989, 2987.025, 3083.248, 3132.235, 3162.989,
2424.902, 1857.665],
"S2RGB": [110.349, 99.507, 75.843],
"S1GRD": [-12.577, -20.265],
"S1RTC": [-10.93, -17.329],
"NDVI": [0.327],
"DEM": [651.663],
},
"std": {
"S2L1C": [1673.639, 1722.641, 1602.205, 1873.138, 1866.055, 1779.839, 1776.496, 1724.114, 1771.041, 1079.786,
512.404, 1340.879, 1172.435],
"S2L2A": [2131.157, 2163.666, 2059.311, 2152.477, 2105.179, 1912.773, 1842.326, 1893.568, 1775.656, 1814.907,
1436.282, 1336.155],
"S2RGB": [69.905, 53.708, 53.378],
"S1GRD": [5.179, 5.872],
"S1RTC": [4.391, 4.459],
"NDVI": [0.322],
"DEM": [928.168]
}
}
def build_terramesh_dataset(
path: str = "https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/",
modalities: list | str = None,
split: str = "val",
urls: str | None = None,
batch_size: int = 8,
return_metadata: bool = False,
shuffle: bool = True,
*args, **kwargs,
):
if len(modalities) == 1:
# Build standard WebDataset for single modality
dataset = build_wds_dataset(
path=path,
modality=modalities[0],
split=split,
urls=urls,
batch_size=batch_size,
return_metadata=return_metadata,
shuffle=shuffle,
*args, **kwargs
)
return dataset
else:
# Build custom multi-modal dataset
dataset = build_multimodal_dataset(
path=path,
modalities=modalities,
split=split,
urls=urls,
batch_size=batch_size,
return_metadata=return_metadata,
shuffle=shuffle,
*args, **kwargs,
)
return dataset
def zarr_decoder(key, value):
if key == "zarr.zip" or key.endswith(".zarr.zip"):
mapper = fsspec.filesystem("zip", fo=io.BytesIO(value), block_size=None).get_mapper("")
return zarr.open_consolidated(mapper, mode="r")["bands"][...]
def zarr_metadata_decoder(sample):
for key, value in list(sample.items()):
if key == "zarr.zip" or key.endswith(".zarr.zip"):
mapper = fsspec.filesystem("zip", fo=io.BytesIO(value), block_size=None).get_mapper("")
data = zarr.open_consolidated(mapper, mode="r")
sample[key] = data["bands"][...]
# Add metadata
if "center_lon" not in sample.keys(): # Same center point for all modalities
sample["center_lon"] = data["center_lon"][...]
sample["center_lat"] = data["center_lat"][...]
if "cloud_mask" in data and "cloud_mask" not in sample.keys(): # Same S2 mask in all optical modalities
sample["cloud_mask"] = data["cloud_mask"][...][np.newaxis, ...] # Add channel dim to mask
if data["time"][...] > 1e6: # DEM has no valid timestamp (value = 0)
time_key = "time" if key == "zarr.zip" else "time_" + key
sample[time_key] = data["time"][...] # Integer values of type "datetime64[ns]"
# TODO Other types are currently not decoded, fall back to autodecode
return sample
def identity(sample):
"""Identity function that does nothing."""
return sample
def drop_time_dim(value, dim: int = 0):
"""
Remove time dimension from data tensors.
"""
if isinstance(value, np.ndarray) or isinstance(value, torch.Tensor):
return value.squeeze(dim)
elif isinstance(value, dict):
for k, v in value.items():
if isinstance(v, np.ndarray) or isinstance(v, torch.Tensor):
value[k] = v.squeeze(dim)
return value
def build_wds_dataset(
path: str = "https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/",
modality: str = "S2L2A",
split: str = "val",
urls: str | None = None,
batch_size: int = 8,
transform: Callable = None,
shuffle: bool = True,
return_metadata: bool = False,
*args, **kwargs
):
if urls is None:
# Select split files
if modality == "S1GRD":
files = split_files["ssl4eos12"][split]
elif modality == "S1GRD":
files = split_files["majortom"][split]
else:
files = split_files["combined"][split]
# Joins majortom and ssl4eos12 shard files with "::" (except for S-1 modalities)
urls = "::".join(
[os.path.join(path, split, modality, f) for f in files]
)
kwargs["shardshuffle"] = kwargs.get("shardshuffle", 100) * shuffle # Shuffle shard
# Build dataset
dataset = wds.WebDataset(urls, *args, **kwargs)
# Decode from bytes to numpy arrays, etc.
dataset = dataset.map(zarr_metadata_decoder) if return_metadata else dataset.decode(zarr_decoder)
# Rename modality to "image" and remove temporal dimension
dataset = (dataset
.rename(image="zarr.zip")
.map(drop_time_dim)
)
if transform is not None:
dataset = dataset.map(transform)
# Create batches
if batch_size is not None:
dataset = dataset.batched(batch_size)
return dataset
def build_multimodal_dataset(
path: str = "https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/",
modalities: list = None,
split: str = "val",
urls: str | None = None,
batch_size: int = 8,
transform: Callable = None,
shuffle: bool = True,
return_metadata: bool = False,
*args, **kwargs
):
if modalities is None:
modalities = ["S2L2A", "S2L1C", "S2RGB", "S1GRD", "S1RTC", "DEM", "NDVI", "LULC"] # Default
if urls is None:
# Filter modalities based availability (S1GRD and S1RTC not present in all subsets)
def filter_list(lst, value):
lst = lst.copy()
# helper function to filter modalities
if value in lst:
lst.remove(value)
return lst
majortom_mod = f"[{','.join(filter_list(modalities, 'S1GRD'))}]"
ssl4eos12_mod = f"[{','.join(filter_list(modalities, 'S1RTC'))}]"
# Joins majortom and ssl4eos12 shard files with "::"
urls = (os.path.join(path, split, majortom_mod, split_files["majortom"][split][0])
+ "::" + os.path.join(path, split, ssl4eos12_mod, split_files["ssl4eos12"][split][0]))
dataset = build_datapipeline(urls, transform, batch_size, shuffle, return_metadata, *args, **kwargs)
return dataset
def build_datapipeline(urls, transform, batch_size, shuffle, return_metadata, *args, **kwargs):
shardshuffle = kwargs.get("shardshuffle", 100) * shuffle # Shuffle shard
deterministic = kwargs.get("deterministic", False)
seed = kwargs.get("seed", 0)
datapipeline = wds.DataPipeline(
# Infinitely sample shards from the shard list with replacement. Each worker is seeded independently.
(
wds.ResampledShards(urls, deterministic=deterministic, seed=seed)
if shuffle else wds.SimpleShardList(urls)
),
multi_tarfile_samples, # Extract individual samples from multi-modal tar files
wds.shuffle(shardshuffle, seed=seed), # Shuffle with a buffer of given size
(
wds.map(zarr_metadata_decoder)
if return_metadata
else wds.decode(zarr_decoder) # Decode from bytes to numpy arrays, etc.
),
wds.map(drop_time_dim), # Remove time dimension from tensors
wds.map(remove_extensions), # Remove "file extensions" from dictionary keys
( # Apply transformation
wds.map(transform)
if transform is not None
else wds.map(identity)
),
( # Batching
wds.batched(batch_size, collation_fn=default_collate, partial=False)
if batch_size is not None
else wds.map(identity)
),
)
return datapipeline
def extract_modality_names(s):
"""
Function from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
"""
# Regular expression pattern to match anything enclosed in "{" and "}", and comma separated
pattern = r"\{([^}]*)\}"
match = re.search(pattern, s)
return match.group(1).split(",") if match else []
def remove_ext_with_gz(s):
"""
Function from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
"""
if s.endswith(".gz"):
s = s.replace(".gz", "")
if s.endswith(".zip"):
s = s.replace(".zip", "")
return os.path.splitext(s)[0]
def remove_extensions(sample):
"""
Function from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
In webdatasets, we identify the type of a given modality by adding an extension
in the form f"{modality_name}.{modality_extension}", e.g. "rgb.jpg" or "caption.json".
This function removes them and returns a dictionary of {f"{modality_name}": modality}.
"""
return {remove_ext_with_gz(k): v for k, v in sample.items()}
def multi_tarfile_samples(
src_iter: Iterable[dict],
handler: Callable[[Exception], bool] = warn_and_continue,
):
"""
This function is adapted from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
Webdataset does not support splitting up shards by modality, so we need to do this manually.
Usually, we would need to save all modalities in the same tar file, e.g. shard_root_train/{00000..12345}.tar,
where each shard contains 1000 samples and each sample contains all modalities.
This is not flexible when adding new modalities, so we instead save each modality in a separate tar file,
e.g. shard_root_train_rgb/{00000..12345}.tar, shard_root_train_caption/{00000..12345}.tar, etc., where each shard contains
again 1000 samples, but each sample contains only one modality. All samples in all shards have to be aligned.
This function takes an iterator over shard URLs, where we use brace expansion to specify multiple tar files per modality.
E.g. shard_root_train_[rgb,caption]/00123.tar will be expanded to shard_root_train_rgb/00123.tar and shard_root_train_caption/00123.tar,
and the samples from these two tar files will be combined into a single sample.
Args:
src_iter: Iterator over shards that *already brace expanded the shard numbers*,
e.g. {"url": "shard_root_train_[rgb,caption]/00000.tar"}, {"url": "shard_root_train_[rgb,caption]/00001.tar"}, ...
This function will also work when no square braces for multiple modalities are used, e.g. {"url": "shard_root_train/00000.tar"}, ...
It can be a drop-in replacement for wds.tarfile_samples.
handler: Function that handles exceptions. If it returns True, the shard is skipped. If it returns False, the function exits.
Yields:
Dictionary of aligned samples from all modalities.
"""
for src in src_iter:
# Multi tar file URLs use brace expansion with square braces
multi_tar_urls = src["url"].translate(str.maketrans("[]", "{}"))
modality_names = extract_modality_names(multi_tar_urls)
multi_tar_urls = list(braceexpand.braceexpand(multi_tar_urls))
# Create tar iterators for shards of all modalities
tar_iters = [
wds.tarfile_samples([{"url": tar_url}]) for tar_url in multi_tar_urls
]
try:
# Loop over these iterators in parallel and combine the tar files from different modalities
for multi_tar_files in zip(*tar_iters):
merged_dict = {}
merged_dict["__key__"] = multi_tar_files[0]["__key__"]
merged_dict["__url__"] = src["url"]
for modality_name, modality_dict in zip(
modality_names, multi_tar_files
):
_key = modality_dict.pop("__key__")
_url = modality_dict.pop("__url__")
if _key != merged_dict["__key__"]:
raise ValueError(
f"Divergence detected! Trying to merge keys {_key} of {modality_name} and {merged_dict['__key__']} of merged_dict with modalities {merged_dict.keys()}."
)
for k, v in modality_dict.items():
if modality_name is None:
merged_dict[k] = v
else:
merged_dict[f"{modality_name}.{k}"] = v
yield merged_dict
except Exception as e:
print(e)
print(f"Exception occurred while processing {src['url']}.")
if handler(e):
print("Skipping shard...")
continue
else:
break
class Transpose(albumentations.ImageOnlyTransform):
"""
Rearrange is a generic image transformation that reshapes an input tensor using a custom einops pattern.
This transform allows flexible reordering of tensor dimensions based on the provided pattern and arguments.
"""
def __init__(self, axis: list):
"""
Initialize the Transpose transform.
Args:
axis (list): Axis for numpy.transpose.
"""
super().__init__(p=1)
self.axis = axis
def apply(self, img, **params):
return np.transpose(img, self.axis)
def get_transform_init_args_names(self):
return "transpose"
def default_non_image_transform(array):
if hasattr(array, "dtype") and (array.dtype == float or array.dtype == int):
return torch.from_numpy(array)
else:
return array
class MultimodalTransforms:
"""
MultimodalTransforms applies albumentations transforms to multiple image modalities.
This class supports both shared transformations across modalities and separate transformations for each modality.
It also handles non-image modalities by applying a specified non-image transform.
This code is adapted from https://github.com/IBM/terratorch/blob/main/terratorch/datasets/transforms.py.
"""
def __init__(
self,
transforms: dict | albumentations.Compose,
non_image_modalities: list[str] | None = None,
non_image_transforms: object | None = None,
):
"""
Initialize the MultimodalTransforms.
Args:
transforms (dict or A.Compose): The transformation(s) to apply to the data.
non_image_modalities (list[str] | None): List of keys corresponding to non-image modalities.
non_image_transforms (object | None): A transform to apply to non-image modalities.
If None, a default transform is used.
"""
self.transforms = transforms
self.non_image_modalities = non_image_modalities or []
self.non_image_transforms = non_image_transforms or default_non_image_transform
def __call__(self, data: dict):
# albumentations requires a key "image" and treats all other keys as additional targets
image_modality = "image" if "image" in data else \
[k for k in data.keys() if k not in self.non_image_modalities][0] # Find an image modality name
data["image"] = data.pop(image_modality) # albumentations expects an input called "image"
data = self.transforms(**data)
data[image_modality] = data.pop("image")
# Process sequence data which is ignored by albumentations as "global_label"
for modality in self.non_image_modalities:
if modality in data:
data[modality] = self.non_image_transforms(data[modality])
return data
class MultimodalNormalize(Callable):
def __init__(self, mean: dict[str, list[float]], std: dict[str, list[float]]):
super().__init__()
self.mean = mean
self.std = std
def __call__(self, **batch):
for m in self.mean.keys():
if m not in batch.keys():
continue
batch[m] = (batch[m] - self.mean[m]) / self.std[m]
return batch
def add_targets(self, targets):
"""
Required by albumentations
"""
pass
|