wanfall / omnifall.py
simplexsigil2's picture
Upload folder using huggingface_hub
ee47bb1 verified
"""OmniFall: A Unified Benchmark for Staged-to-Wild Fall Detection
This dataset builder provides unified access to the OmniFall benchmark, which integrates:
- OF-Staged (OF-Sta): 8 public staged fall detection datasets (~14h single-view)
- OF-In-the-Wild (OF-ItW): Curated genuine accident videos from OOPS (~2.7h)
- OF-Synthetic (OF-Syn): 12,000 synthetic videos generated with Wan 2.2 (~17h)
All components share a 16-class activity taxonomy. Staged datasets use classes 0-9,
while OF-ItW and OF-Syn use the full 0-15 range.
"""
import warnings
import pandas as pd
import datasets
from datasets import (
BuilderConfig,
GeneratorBasedBuilder,
Features,
Value,
ClassLabel,
Sequence,
SplitGenerator,
Split,
Video,
)
_CITATION = """\
@misc{omnifall,
title={OmniFall: A Unified Staged-to-Wild Benchmark for Human Fall Detection},
author={David Schneider and Zdravko Marinov and Rafael Baur and Zeyun Zhong and Rodi D\\\"uger and Rainer Stiefelhagen},
year={2025},
eprint={2505.19889},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2505.19889},
}
"""
_DESCRIPTION = """\
OmniFall is a comprehensive benchmark that unifies staged, in-the-wild, and synthetic
fall detection datasets under a common 16-class activity taxonomy.
"""
_HOMEPAGE = "https://huggingface.co/datasets/simplexsigil2/omnifall"
_LICENSE = "cc-by-nc-4.0"
# 16 activity classes shared across all components
_ACTIVITY_LABELS = [
"walk", # 0
"fall", # 1
"fallen", # 2
"sit_down", # 3
"sitting", # 4
"lie_down", # 5
"lying", # 6
"stand_up", # 7
"standing", # 8
"other", # 9
"kneel_down", # 10
"kneeling", # 11
"squat_down", # 12
"squatting", # 13
"crawl", # 14
"jump", # 15
]
# Demographic and scene metadata categories (OF-Syn only)
_AGE_GROUPS = [
"toddlers_1_4", "children_5_12", "teenagers_13_17",
"young_adults_18_34", "middle_aged_35_64", "elderly_65_plus",
]
_GENDERS = ["male", "female"]
_SKIN_TONES = [f"mst{i}" for i in range(1, 11)]
_ETHNICITIES = ["white", "black", "asian", "hispanic_latino", "aian", "nhpi", "mena"]
_BMI_BANDS = ["underweight", "normal", "overweight", "obese"]
_HEIGHT_BANDS = ["short", "avg", "tall"]
_ENVIRONMENTS = ["indoor", "outdoor"]
_CAMERA_ELEVATIONS = ["eye", "low", "high", "top"]
_CAMERA_AZIMUTHS = ["front", "rear", "left", "right"]
_CAMERA_DISTANCES = ["medium", "far"]
_CAMERA_SHOTS = ["static_wide", "static_medium_wide"]
_SPEEDS = ["24fps_rt", "25fps_rt", "30fps_rt", "std_rt"]
# The 8 staged datasets
_STAGED_DATASETS = [
"caucafall", "cmdfall", "edf", "gmdcsa24",
"le2i", "mcfd", "occu", "up_fall",
]
# Label CSV file paths (relative to repo root)
_STAGED_LABEL_FILES = [f"labels/{name}.csv" for name in [
"caucafall", "cmdfall", "edf", "GMDCSA24",
"le2i", "mcfd", "occu", "up_fall",
]]
_ITW_LABEL_FILE = "labels/OOPS.csv"
_SYN_LABEL_FILE = "labels/of-syn.csv"
_SYN_VIDEO_ARCHIVE = "data_files/omnifall-synthetic_av1.tar"
# ---- Feature schema definitions ----
def _core_features():
"""7-column schema for staged/OOPS data."""
return Features({
"path": Value("string"),
"label": ClassLabel(num_classes=16, names=_ACTIVITY_LABELS),
"start": Value("float32"),
"end": Value("float32"),
"subject": Value("int32"),
"cam": Value("int32"),
"dataset": Value("string"),
})
def _syn_features():
"""19-column schema for synthetic data (core + demographic/scene metadata)."""
return Features({
"path": Value("string"),
"label": ClassLabel(num_classes=16, names=_ACTIVITY_LABELS),
"start": Value("float32"),
"end": Value("float32"),
"subject": Value("int32"),
"cam": Value("int32"),
"dataset": Value("string"),
# Demographic metadata
"age_group": ClassLabel(num_classes=6, names=_AGE_GROUPS),
"gender_presentation": ClassLabel(num_classes=2, names=_GENDERS),
"monk_skin_tone": ClassLabel(num_classes=10, names=_SKIN_TONES),
"race_ethnicity_omb": ClassLabel(num_classes=7, names=_ETHNICITIES),
"bmi_band": ClassLabel(num_classes=4, names=_BMI_BANDS),
"height_band": ClassLabel(num_classes=3, names=_HEIGHT_BANDS),
# Scene metadata
"environment_category": ClassLabel(num_classes=2, names=_ENVIRONMENTS),
"camera_shot": ClassLabel(num_classes=2, names=_CAMERA_SHOTS),
"speed": ClassLabel(num_classes=4, names=_SPEEDS),
"camera_elevation": ClassLabel(num_classes=4, names=_CAMERA_ELEVATIONS),
"camera_azimuth": ClassLabel(num_classes=4, names=_CAMERA_AZIMUTHS),
"camera_distance": ClassLabel(num_classes=2, names=_CAMERA_DISTANCES),
})
def _syn_metadata_features():
"""Feature schema for OF-Syn metadata config (video-level, no temporal segments)."""
return Features({
"path": Value("string"),
"dataset": Value("string"),
"age_group": ClassLabel(num_classes=6, names=_AGE_GROUPS),
"gender_presentation": ClassLabel(num_classes=2, names=_GENDERS),
"monk_skin_tone": ClassLabel(num_classes=10, names=_SKIN_TONES),
"race_ethnicity_omb": ClassLabel(num_classes=7, names=_ETHNICITIES),
"bmi_band": ClassLabel(num_classes=4, names=_BMI_BANDS),
"height_band": ClassLabel(num_classes=3, names=_HEIGHT_BANDS),
"environment_category": ClassLabel(num_classes=2, names=_ENVIRONMENTS),
"camera_shot": ClassLabel(num_classes=2, names=_CAMERA_SHOTS),
"speed": ClassLabel(num_classes=4, names=_SPEEDS),
"camera_elevation": ClassLabel(num_classes=4, names=_CAMERA_ELEVATIONS),
"camera_azimuth": ClassLabel(num_classes=4, names=_CAMERA_AZIMUTHS),
"camera_distance": ClassLabel(num_classes=2, names=_CAMERA_DISTANCES),
})
def _syn_framewise_features():
"""Feature schema for OF-Syn frame-wise labels (81 labels per video)."""
return Features({
"path": Value("string"),
"dataset": Value("string"),
"frame_labels": Sequence(
ClassLabel(num_classes=16, names=_ACTIVITY_LABELS), length=81
),
"age_group": ClassLabel(num_classes=6, names=_AGE_GROUPS),
"gender_presentation": ClassLabel(num_classes=2, names=_GENDERS),
"monk_skin_tone": ClassLabel(num_classes=10, names=_SKIN_TONES),
"race_ethnicity_omb": ClassLabel(num_classes=7, names=_ETHNICITIES),
"bmi_band": ClassLabel(num_classes=4, names=_BMI_BANDS),
"height_band": ClassLabel(num_classes=3, names=_HEIGHT_BANDS),
"environment_category": ClassLabel(num_classes=2, names=_ENVIRONMENTS),
"camera_shot": ClassLabel(num_classes=2, names=_CAMERA_SHOTS),
"speed": ClassLabel(num_classes=4, names=_SPEEDS),
"camera_elevation": ClassLabel(num_classes=4, names=_CAMERA_ELEVATIONS),
"camera_azimuth": ClassLabel(num_classes=4, names=_CAMERA_AZIMUTHS),
"camera_distance": ClassLabel(num_classes=2, names=_CAMERA_DISTANCES),
})
def _paths_only_features():
"""Minimal feature schema for paths-only mode."""
return Features({"path": Value("string")})
# ---- Config ----
class OmniFallConfig(BuilderConfig):
"""BuilderConfig for OmniFall dataset.
Args:
config_type: What kind of data to load.
"labels" - All labels in a single split (no train/val/test).
"split" - Train/val/test splits from split CSV files.
"metadata" - Video-level metadata (OF-Syn only).
"framewise" - Frame-wise HDF5 labels (OF-Syn only).
data_source: Which component(s) to load.
"staged" - 8 staged lab datasets
"itw" - OOPS in-the-wild
"syn" - OF-Syn synthetic
"staged+itw" - Staged and OOPS combined
Individual dataset names (e.g. "cmdfall") for single datasets.
split_type: Split strategy.
"cs" / "cv" for staged/OOPS, "random" / "cross_age" / etc. for synthetic.
train_source: For cross-domain configs, overrides data_source for train/val.
test_source: For cross-domain configs, overrides data_source for test.
test_split_type: For cross-domain configs, overrides split_type for test.
paths_only: If True, only return video paths (no label merging).
framewise: If True, load frame-wise labels from HDF5 (OF-Syn only).
include_video: If True, download and include video files (OF-Syn only).
decode_video: If True (default), use Video() feature for auto-decoding.
If False, return absolute file path as string.
deprecated_alias_for: If set, this config is a deprecated alias.
"""
def __init__(
self,
config_type="labels",
data_source="staged+itw",
split_type=None,
train_source=None,
test_source=None,
test_split_type=None,
paths_only=False,
framewise=False,
include_video=False,
decode_video=True,
deprecated_alias_for=None,
**kwargs,
):
super().__init__(**kwargs)
self.config_type = config_type
self.data_source = data_source
self.split_type = split_type
self.train_source = train_source
self.test_source = test_source
self.test_split_type = test_split_type
self.paths_only = paths_only
self.framewise = framewise
self.include_video = include_video
self.decode_video = decode_video
self.deprecated_alias_for = deprecated_alias_for
@property
def is_crossdomain(self):
return self.train_source is not None
def _make_config(name, description, **kwargs):
"""Helper to create a config with consistent version."""
return OmniFallConfig(
name=name,
version=datasets.Version("2.0.0"),
description=description,
**kwargs,
)
# ---- Config definitions ----
_LABELS_CONFIGS = [
_make_config(
"labels",
"All staged + OOPS labels (52k segments, 7 columns). Default config.",
config_type="labels",
data_source="staged+itw",
),
_make_config(
"labels-syn",
"OF-Syn labels with demographic metadata (19k segments, 19 columns).",
config_type="labels",
data_source="syn",
),
_make_config(
"metadata-syn",
"OF-Syn video-level metadata (12k videos, no temporal segments).",
config_type="metadata",
data_source="syn",
),
_make_config(
"framewise-syn",
"OF-Syn frame-wise labels from HDF5 (81 labels per video).",
config_type="framewise",
data_source="syn",
framewise=True,
),
]
_AGGREGATE_CONFIGS = [
_make_config(
"cs",
"Cross-subject splits for all staged + OOPS datasets combined.",
config_type="split",
data_source="staged+itw",
split_type="cs",
),
_make_config(
"cv",
"Cross-view splits for all staged + OOPS datasets combined.",
config_type="split",
data_source="staged+itw",
split_type="cv",
),
]
_PRIMARY_CONFIGS = [
_make_config(
"of-sta-cs",
"OF-Staged: 8 staged datasets, cross-subject splits.",
config_type="split",
data_source="staged",
split_type="cs",
),
_make_config(
"of-sta-cv",
"OF-Staged: 8 staged datasets, cross-view splits.",
config_type="split",
data_source="staged",
split_type="cv",
),
_make_config(
"of-itw",
"OF-ItW: OOPS-Fall in-the-wild genuine accidents.",
config_type="split",
data_source="itw",
split_type="cs",
),
_make_config(
"of-syn",
"OF-Syn: synthetic, random 80/10/10 split.",
config_type="split",
data_source="syn",
split_type="random",
),
_make_config(
"of-syn-cross-age",
"OF-Syn: cross-age split (train: adults, test: children/elderly).",
config_type="split",
data_source="syn",
split_type="cross_age",
),
_make_config(
"of-syn-cross-ethnicity",
"OF-Syn: cross-ethnicity split.",
config_type="split",
data_source="syn",
split_type="cross_ethnicity",
),
_make_config(
"of-syn-cross-bmi",
"OF-Syn: cross-BMI split (train: normal/underweight, test: obese).",
config_type="split",
data_source="syn",
split_type="cross_bmi",
),
]
_CROSSDOMAIN_CONFIGS = [
_make_config(
"of-sta-itw-cs",
"Cross-domain: train/val on staged CS, test on OOPS.",
config_type="split",
data_source="staged",
split_type="cs",
train_source="staged",
test_source="itw",
test_split_type="cs",
),
_make_config(
"of-sta-itw-cv",
"Cross-domain: train/val on staged CV, test on OOPS.",
config_type="split",
data_source="staged",
split_type="cv",
train_source="staged",
test_source="itw",
test_split_type="cv",
),
_make_config(
"of-syn-itw",
"Cross-domain: train/val on OF-Syn random, test on OOPS.",
config_type="split",
data_source="syn",
split_type="random",
train_source="syn",
test_source="itw",
test_split_type="cs",
),
]
_INDIVIDUAL_CONFIGS = [
_make_config(
name,
f"{name} dataset with cross-subject splits.",
config_type="split",
data_source=name,
split_type="cs",
)
for name in _STAGED_DATASETS
]
# Deprecated aliases: defined with full correct attributes so _info() works
# immediately (HF calls _info() during __init__, before any custom init code).
_DEPRECATED_ALIASES = {
"cs-staged": "of-sta-cs",
"cv-staged": "of-sta-cv",
"cs-staged-wild": "of-sta-itw-cs",
"cv-staged-wild": "of-sta-itw-cv",
"OOPS": "of-itw",
}
# Build a lookup from config name to config object
_ALL_NAMED_CONFIGS = {
cfg.name: cfg
for cfg in (
_LABELS_CONFIGS + _AGGREGATE_CONFIGS + _PRIMARY_CONFIGS
+ _CROSSDOMAIN_CONFIGS + _INDIVIDUAL_CONFIGS
)
}
_DEPRECATED_CONFIGS = []
for _old_name, _new_name in _DEPRECATED_ALIASES.items():
_target = _ALL_NAMED_CONFIGS[_new_name]
_DEPRECATED_CONFIGS.append(
_make_config(
_old_name,
f"DEPRECATED: Use '{_new_name}' instead.",
config_type=_target.config_type,
data_source=_target.data_source,
split_type=_target.split_type,
train_source=_target.train_source,
test_source=_target.test_source,
test_split_type=_target.test_split_type,
paths_only=_target.paths_only,
framewise=_target.framewise,
include_video=_target.include_video,
decode_video=_target.decode_video,
deprecated_alias_for=_new_name,
)
)
# ---- Builder ----
class OmniFall(GeneratorBasedBuilder):
"""OmniFall unified fall detection benchmark builder."""
VERSION = datasets.Version("2.0.0")
BUILDER_CONFIG_CLASS = OmniFallConfig
BUILDER_CONFIGS = (
_LABELS_CONFIGS
+ _AGGREGATE_CONFIGS
+ _PRIMARY_CONFIGS
+ _CROSSDOMAIN_CONFIGS
+ _INDIVIDUAL_CONFIGS
+ _DEPRECATED_CONFIGS
)
DEFAULT_CONFIG_NAME = "labels"
def _info(self):
"""Return dataset metadata and feature schema."""
cfg = self.config
if cfg.config_type == "metadata":
features = _syn_metadata_features()
elif cfg.framewise:
features = _syn_framewise_features()
elif cfg.paths_only:
features = _paths_only_features()
elif cfg.is_crossdomain:
# Cross-domain configs mix sources, use common 7-col schema
features = _core_features()
elif cfg.data_source == "syn":
features = _syn_features()
else:
features = _core_features()
if cfg.include_video:
features["video"] = Video() if cfg.decode_video else Value("string")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
# ---- Split generators ----
def _split_generators(self, dl_manager):
cfg = self.config
# Emit deprecation warning
if cfg.deprecated_alias_for:
warnings.warn(
f"Config '{cfg.name}' is deprecated. "
f"Use '{cfg.deprecated_alias_for}' instead.",
DeprecationWarning,
stacklevel=2,
)
# Labels configs: all data in a single "train" split
if cfg.config_type == "labels":
return self._labels_splits(cfg, dl_manager)
# Metadata config
if cfg.config_type == "metadata":
metadata_path = dl_manager.download("videos/metadata.csv")
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"mode": "metadata", "metadata_path": metadata_path},
),
]
# Framewise config (no split, all data)
if cfg.config_type == "framewise":
archive_path = dl_manager.download_and_extract(
"data_files/syn_frame_wise_labels.tar.zst"
)
metadata_path = dl_manager.download("videos/metadata.csv")
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"mode": "framewise",
"hdf5_dir": archive_path,
"metadata_path": metadata_path,
"split_file": None,
},
),
]
# Split configs (train/val/test)
if cfg.config_type == "split":
return self._split_config_generators(cfg, dl_manager)
raise ValueError(f"Unknown config_type: {cfg.config_type}")
def _labels_splits(self, cfg, dl_manager):
"""Generate split generators for labels-type configs."""
if cfg.data_source == "syn":
filepath = dl_manager.download(_SYN_LABEL_FILE)
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"mode": "csv_direct", "filepath": filepath},
),
]
elif cfg.data_source == "staged+itw":
filepaths = dl_manager.download(_STAGED_LABEL_FILES + [_ITW_LABEL_FILE])
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"mode": "csv_multi", "filepaths": filepaths},
),
]
else:
raise ValueError(f"Unsupported data_source for labels: {cfg.data_source}")
def _split_config_generators(self, cfg, dl_manager):
"""Generate split generators for train/val/test split configs."""
if cfg.is_crossdomain:
return self._crossdomain_splits(cfg, dl_manager)
if cfg.data_source == "syn":
return self._syn_splits(cfg, dl_manager)
elif cfg.data_source == "staged":
return self._staged_splits(cfg, dl_manager)
elif cfg.data_source == "itw":
return self._itw_splits(cfg, dl_manager)
elif cfg.data_source == "staged+itw":
return self._aggregate_splits(cfg, dl_manager)
elif cfg.data_source in _STAGED_DATASETS:
return self._individual_splits(cfg, dl_manager)
else:
raise ValueError(f"Unknown data_source: {cfg.data_source}")
def _staged_split_files(self, split_type, split_name):
"""Return list of split CSV paths for all 8 staged datasets."""
return [f"splits/{split_type}/{ds}/{split_name}.csv" for ds in _STAGED_DATASETS]
def _make_split_merge_generators(self, split_files_per_split, label_files,
dl_manager, video_dir=None):
"""Helper to create train/val/test SplitGenerators for split_merge mode.
Args:
split_files_per_split: dict mapping split name to list of relative paths.
label_files: list of relative label file paths.
dl_manager: download manager for resolving paths.
video_dir: path to extracted video directory, or None.
"""
resolved_labels = dl_manager.download(label_files)
return [
SplitGenerator(
name=split_enum,
gen_kwargs={
"mode": "split_merge",
"split_files": dl_manager.download(split_files_per_split[csv_name]),
"label_files": resolved_labels,
"video_dir": video_dir,
},
)
for split_enum, csv_name in [
(Split.TRAIN, "train"),
(Split.VALIDATION, "val"),
(Split.TEST, "test"),
]
]
def _staged_splits(self, cfg, dl_manager):
"""OF-Staged: 8 datasets combined with CS or CV splits."""
st = cfg.split_type
return self._make_split_merge_generators(
{sn: self._staged_split_files(st, sn) for sn in ("train", "val", "test")},
_STAGED_LABEL_FILES,
dl_manager,
)
def _itw_splits(self, cfg, dl_manager):
"""OF-ItW: OOPS-Fall (CS=CV identical)."""
st = cfg.split_type
return self._make_split_merge_generators(
{sn: [f"splits/{st}/OOPS/{sn}.csv"] for sn in ("train", "val", "test")},
[_ITW_LABEL_FILE],
dl_manager,
)
def _aggregate_splits(self, cfg, dl_manager):
"""All staged + OOPS combined (cs or cv)."""
st = cfg.split_type
all_labels = _STAGED_LABEL_FILES + [_ITW_LABEL_FILE]
return self._make_split_merge_generators(
{sn: self._staged_split_files(st, sn) + [f"splits/{st}/OOPS/{sn}.csv"]
for sn in ("train", "val", "test")},
all_labels,
dl_manager,
)
def _individual_splits(self, cfg, dl_manager):
"""Individual dataset with CS splits."""
ds_name = cfg.data_source
label_file_map = {
"caucafall": "labels/caucafall.csv",
"cmdfall": "labels/cmdfall.csv",
"edf": "labels/edf.csv",
"gmdcsa24": "labels/GMDCSA24.csv",
"le2i": "labels/le2i.csv",
"mcfd": "labels/mcfd.csv",
"occu": "labels/occu.csv",
"up_fall": "labels/up_fall.csv",
}
label_file = label_file_map[ds_name]
st = cfg.split_type
return self._make_split_merge_generators(
{sn: [f"splits/{st}/{ds_name}/{sn}.csv"] for sn in ("train", "val", "test")},
[label_file],
dl_manager,
)
def _syn_splits(self, cfg, dl_manager):
"""OF-Syn split strategies."""
st = cfg.split_type
split_dir = f"splits/syn/{st}"
# Download video archive if requested
video_dir = None
if cfg.include_video:
video_dir = dl_manager.download_and_extract(_SYN_VIDEO_ARCHIVE)
if cfg.framewise:
archive_path = dl_manager.download_and_extract(
"data_files/syn_frame_wise_labels.tar.zst"
)
metadata_path = dl_manager.download("videos/metadata.csv")
split_files = dl_manager.download(
{sn: f"{split_dir}/{sn}.csv" for sn in ("train", "val", "test")}
)
return [
SplitGenerator(
name=split_enum,
gen_kwargs={
"mode": "framewise",
"hdf5_dir": archive_path,
"metadata_path": metadata_path,
"split_file": split_files[csv_name],
},
)
for split_enum, csv_name in [
(Split.TRAIN, "train"),
(Split.VALIDATION, "val"),
(Split.TEST, "test"),
]
]
if cfg.paths_only:
split_files = dl_manager.download(
{sn: f"{split_dir}/{sn}.csv" for sn in ("train", "val", "test")}
)
return [
SplitGenerator(
name=split_enum,
gen_kwargs={
"mode": "paths_only",
"split_file": split_files[csv_name],
},
)
for split_enum, csv_name in [
(Split.TRAIN, "train"),
(Split.VALIDATION, "val"),
(Split.TEST, "test"),
]
]
return self._make_split_merge_generators(
{sn: [f"{split_dir}/{sn}.csv"] for sn in ("train", "val", "test")},
[_SYN_LABEL_FILE],
dl_manager,
video_dir=video_dir,
)
def _crossdomain_splits(self, cfg, dl_manager):
"""Cross-domain configs: train/val from one source, test from another."""
train_st = cfg.split_type
test_st = cfg.test_split_type or "cs"
# Download video archive if requested and train source is syn
video_dir = None
if cfg.include_video and cfg.train_source == "syn":
video_dir = dl_manager.download_and_extract(_SYN_VIDEO_ARCHIVE)
# Determine train/val files and labels
if cfg.train_source == "staged":
train_split_files = {
sn: self._staged_split_files(train_st, sn)
for sn in ("train", "val")
}
train_labels = _STAGED_LABEL_FILES
elif cfg.train_source == "syn":
train_split_files = {
sn: [f"splits/syn/{train_st}/{sn}.csv"]
for sn in ("train", "val")
}
train_labels = [_SYN_LABEL_FILE]
else:
raise ValueError(f"Unsupported train_source: {cfg.train_source}")
# Determine test files and labels
if cfg.test_source == "itw":
test_split_files = [f"splits/{test_st}/OOPS/test.csv"]
test_labels = [_ITW_LABEL_FILE]
else:
raise ValueError(f"Unsupported test_source: {cfg.test_source}")
# Download all paths
resolved_train_labels = dl_manager.download(train_labels)
resolved_test_labels = dl_manager.download(test_labels)
resolved_test_splits = dl_manager.download(test_split_files)
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"mode": "split_merge",
"split_files": dl_manager.download(train_split_files["train"]),
"label_files": resolved_train_labels,
"video_dir": video_dir,
},
),
SplitGenerator(
name=Split.VALIDATION,
gen_kwargs={
"mode": "split_merge",
"split_files": dl_manager.download(train_split_files["val"]),
"label_files": resolved_train_labels,
"video_dir": video_dir,
},
),
SplitGenerator(
name=Split.TEST,
gen_kwargs={
"mode": "split_merge",
"split_files": resolved_test_splits,
"label_files": resolved_test_labels,
"video_dir": None, # test source (itw) has no hosted videos
},
),
]
# ---- Example generators ----
def _generate_examples(self, mode, **kwargs):
"""Dispatch to the appropriate generator based on mode."""
if mode == "csv_direct":
yield from self._gen_csv_direct(**kwargs)
elif mode == "csv_multi":
yield from self._gen_csv_multi(**kwargs)
elif mode == "split_merge":
yield from self._gen_split_merge(**kwargs)
elif mode == "metadata":
yield from self._gen_metadata(**kwargs)
elif mode == "framewise":
yield from self._gen_framewise(**kwargs)
elif mode == "paths_only":
yield from self._gen_paths_only(**kwargs)
else:
raise ValueError(f"Unknown generation mode: {mode}")
def _gen_csv_direct(self, filepath):
"""Load a single CSV file directly."""
df = pd.read_csv(filepath)
for idx, row in df.iterrows():
yield idx, self._row_to_example(row)
def _gen_csv_multi(self, filepaths):
"""Load and concatenate multiple CSV files."""
dfs = [pd.read_csv(fp) for fp in filepaths]
df = pd.concat(dfs, ignore_index=True)
for idx, row in df.iterrows():
yield idx, self._row_to_example(row)
def _gen_split_merge(self, split_files, label_files, video_dir=None):
"""Load split paths, merge with labels, yield examples."""
import os
split_dfs = [pd.read_csv(sf) for sf in split_files]
split_df = pd.concat(split_dfs, ignore_index=True)
if self.config.paths_only:
for idx, row in split_df.iterrows():
yield idx, {"path": row["path"]}
return
label_dfs = [pd.read_csv(lf) for lf in label_files]
labels_df = pd.concat(label_dfs, ignore_index=True)
merged_df = pd.merge(split_df, labels_df, on="path", how="left")
for idx, row in merged_df.iterrows():
example = self._row_to_example(row)
if video_dir is not None:
example["video"] = os.path.join(video_dir, row["path"] + ".mp4")
yield idx, example
def _gen_metadata(self, metadata_path):
"""Load OF-Syn video-level metadata."""
df = pd.read_csv(metadata_path)
metadata_cols = [
"path", "age_group", "gender_presentation", "monk_skin_tone",
"race_ethnicity_omb", "bmi_band", "height_band",
"environment_category", "camera_shot", "speed",
"camera_elevation", "camera_azimuth", "camera_distance",
]
available_cols = [c for c in metadata_cols if c in df.columns]
df = df[available_cols].drop_duplicates(subset=["path"]).reset_index(drop=True)
df["dataset"] = "of-syn"
for idx, row in df.iterrows():
yield idx, self._row_to_example(row)
def _gen_framewise(self, hdf5_dir, metadata_path, split_file=None):
"""Load frame-wise labels from HDF5 files with metadata."""
import h5py
import tarfile
from pathlib import Path
metadata_df = pd.read_csv(metadata_path)
valid_paths = None
if split_file is not None:
split_df = pd.read_csv(split_file)
valid_paths = set(split_df["path"].tolist())
hdf5_path = Path(hdf5_dir)
metadata_fields = [
"age_group", "gender_presentation", "monk_skin_tone",
"race_ethnicity_omb", "bmi_band", "height_band",
"environment_category", "camera_shot", "speed",
"camera_elevation", "camera_azimuth", "camera_distance",
]
if hdf5_path.is_file() and (
hdf5_path.suffix == ".tar" or tarfile.is_tarfile(str(hdf5_path))
):
idx = 0
with tarfile.open(hdf5_path, "r") as tar:
for member in tar.getmembers():
if not member.name.endswith(".h5"):
continue
video_path = member.name.lstrip("./").replace(".h5", "")
if valid_paths is not None and video_path not in valid_paths:
continue
try:
h5_file = tar.extractfile(member)
if h5_file is None:
continue
import tempfile
with tempfile.NamedTemporaryFile(suffix=".h5", delete=True) as tmp:
tmp.write(h5_file.read())
tmp.flush()
with h5py.File(tmp.name, "r") as f:
frame_labels = f["label_indices"][:].tolist()
video_metadata = metadata_df[metadata_df["path"] == video_path]
if len(video_metadata) == 0:
continue
video_meta = video_metadata.iloc[0]
example = {
"path": video_path,
"dataset": "of-syn",
"frame_labels": frame_labels,
}
for field in metadata_fields:
if field in video_meta and pd.notna(video_meta[field]):
example[field] = str(video_meta[field])
yield idx, example
idx += 1
except Exception as e:
warnings.warn(f"Failed to process {member.name}: {e}")
continue
else:
hdf5_files = sorted(hdf5_path.glob("**/*.h5"))
idx = 0
for h5_file_path in hdf5_files:
relative_path = h5_file_path.relative_to(hdf5_path)
video_path = str(relative_path.with_suffix(""))
if valid_paths is not None and video_path not in valid_paths:
continue
try:
with h5py.File(h5_file_path, "r") as f:
frame_labels = f["label_indices"][:].tolist()
video_metadata = metadata_df[metadata_df["path"] == video_path]
if len(video_metadata) == 0:
continue
video_meta = video_metadata.iloc[0]
example = {
"path": video_path,
"dataset": "of-syn",
"frame_labels": frame_labels,
}
for field in metadata_fields:
if field in video_meta and pd.notna(video_meta[field]):
example[field] = str(video_meta[field])
yield idx, example
idx += 1
except Exception as e:
warnings.warn(f"Failed to process {h5_file_path}: {e}")
continue
def _gen_paths_only(self, split_file):
"""Load paths only from a split file."""
df = pd.read_csv(split_file)
for idx, row in df.iterrows():
yield idx, {"path": row["path"]}
def _row_to_example(self, row):
"""Convert a DataFrame row to a typed example dict.
Only includes fields present in the row. HuggingFace's Features.encode_example()
will ignore extra fields and fill missing optional fields.
"""
example = {"path": str(row["path"])}
# Core temporal fields
for field, dtype in [
("label", int), ("start", float), ("end", float),
("subject", int), ("cam", int),
]:
if field in row.index and pd.notna(row[field]):
example[field] = dtype(row[field])
if "dataset" in row.index and pd.notna(row["dataset"]):
example["dataset"] = str(row["dataset"])
# Demographic and scene metadata (present only for syn data)
for field in [
"age_group", "gender_presentation", "monk_skin_tone",
"race_ethnicity_omb", "bmi_band", "height_band",
"environment_category", "camera_shot", "speed",
"camera_elevation", "camera_azimuth", "camera_distance",
]:
if field in row.index and pd.notna(row[field]):
example[field] = str(row[field])
return example