| { |
| "@context": { |
| "@language": "en", |
| "@vocab": "https://schema.org/", |
| "citeAs": "cr:citeAs", |
| "column": "cr:column", |
| "conformsTo": "dct:conformsTo", |
| "cr": "http://mlcommons.org/croissant/", |
| "rai": "http://mlcommons.org/croissant/RAI/", |
| "data": { |
| "@id": "cr:data", |
| "@type": "@json" |
| }, |
| "dataType": { |
| "@id": "cr:dataType", |
| "@type": "@vocab" |
| }, |
| "dct": "http://purl.org/dc/terms/", |
| "examples": { |
| "@id": "cr:examples", |
| "@type": "@json" |
| }, |
| "extract": "cr:extract", |
| "field": "cr:field", |
| "fileProperty": "cr:fileProperty", |
| "fileObject": "cr:fileObject", |
| "fileSet": "cr:fileSet", |
| "format": "cr:format", |
| "includes": "cr:includes", |
| "isLiveDataset": "cr:isLiveDataset", |
| "jsonPath": "cr:jsonPath", |
| "key": "cr:key", |
| "md5": "cr:md5", |
| "parentField": "cr:parentField", |
| "path": "cr:path", |
| "recordSet": "cr:recordSet", |
| "references": "cr:references", |
| "regex": "cr:regex", |
| "repeated": "cr:repeated", |
| "replace": "cr:replace", |
| "sc": "https://schema.org/", |
| "separator": "cr:separator", |
| "source": "cr:source", |
| "subField": "cr:subField", |
| "transform": "cr:transform" |
| }, |
| "@type": "sc:Dataset", |
| "name": "VBVR-MultiStep-Bench", |
| "conformsTo": "http://mlcommons.org/croissant/1.0", |
| "description": "The frozen 180-instance public evaluation split of the VBVR-MultiStep benchmark for long-horizon multi-step image-to-video reasoning. 36 parameterized tasks across six reasoning families (Navigation, Planning, CSP, Execution, Geometry, Physics). Each instance follows a five-artifact contract: first_frame.png, prompt.txt, final_frame.png, ground_truth.mp4, question_metadata.json.", |
| "alternateName": ["VBVR-MultiStep Evaluation Split"], |
| "creator": { |
| "@type": "sc:Organization", |
| "name": "Video-Reason", |
| "url": "https://video-reason.com" |
| }, |
| "datePublished": "2026-05-06", |
| "keywords": ["video reasoning", "multi-step reasoning", "long-horizon", "image-to-video", "benchmark", "synthetic"], |
| "license": "https://creativecommons.org/licenses/by/4.0/", |
| "url": "https://huggingface.co/datasets/Video-Reason/VBVR-MultiStep-Bench", |
| "version": "1.0.0", |
| "isLiveDataset": false, |
| "rai:dataCollection": "Fully synthetic. Every instance is procedurally produced by a deterministic per-task generator that consumes only released task definitions and a seed. There is no scraping, no human subjects, no third-party media, and no annotation of pre-existing content. Generators run as released code; instance manifests record the seed, the generator version, and per-task tolerances for exact reproduction.", |
| "rai:dataCollectionType": ["Synthetic"], |
| "rai:hasSyntheticData": true, |
| "rai:dataPreprocessingProtocol": "No post-collection preprocessing is applied. Each task's generator emits the five-artifact contract (first_frame.png, prompt.txt, final_frame.png, ground_truth.mp4, question_metadata.json) directly. Sample index, seed, and version are recorded in question_metadata.json so any instance is regenerable from the released code.", |
| "rai:dataAnnotationProtocol": "No human annotation. The reference rollout (ground_truth.mp4) is computed by the generator's deterministic ground-truth solver and is not produced by humans.", |
| "rai:dataAnnotationPlatform": "N/A (no annotation).", |
| "rai:dataReleaseMaintenancePlan": "Versioned releases on the Hugging Face Hub. The Croissant file in this repository is the canonical long-term record. Issues and breaking changes will be documented in repository commits and the dataset version field.", |
| "rai:dataLimitations": [ |
| "Tasks are stylized and synthetic; transfer to unconstrained open-world video is not validated.", |
| "Visual style is generator-controlled and intentionally simplified to keep symbolic state recoverable; this is not a photorealism benchmark.", |
| "The CSP family ships 6 tasks but only 3 (Multi-13/14/15 excluded) are used in the human-judging pool described in the companion paper.", |
| "Reference rollouts encode one valid trajectory per instance; alternative valid trajectories are not enumerated." |
| ], |
| "rai:dataBiases": [ |
| "Distribution biases inherited from generator parameter ranges: e.g., maze sizes, planning horizons, physics regimes are drawn from fixed bounded distributions and do not span the long tail of real-world settings.", |
| "Family balance is uniform (6 tasks per family) by design; this is a deliberate evaluation choice and does not reflect natural prevalence of these reasoning patterns.", |
| "Visual rendering is monocular, planar, and stylized; appearance distribution does not approximate any real-world video corpus.", |
| "No demographic content is generated; bias along human demographic axes does not apply to this dataset." |
| ], |
| "rai:personalSensitiveInformation": "None. The dataset contains no personal information, no biometric data, no demographic information, and no human subjects. All visual content is procedurally generated geometric / symbolic / physical scenes.", |
| "rai:dataUseCases": "Trajectory-level evaluation of image-to-video systems on long-horizon, rule-grounded tasks. Suitable for: (a) blind human pairwise comparison across model families on process correctness, fidelity, and render-quality axes; (b) automated reference-trajectory comparison once parsers for dense video state become reliable; (c) family-localized failure-mode diagnosis. The companion paper validates use case (a). Use cases (b) and (c) are not validated by this release.", |
| "rai:dataSocialImpact": "Intended for research on reasoning evaluation in video generation. Risks are minimal: the dataset is synthetic, free of personal content, and designed for evaluation rather than deployment. Potential for misuse is low; the most plausible negative impact is overfitting research on stylized rule-checked tasks at the expense of unconstrained video understanding, which we mitigate by clearly scoping the contribution as a complement to (not a replacement for) appearance-centric evaluation." |
| } |
|
|