Mark7121983123 commited on
Commit
2322ac4
·
verified ·
1 Parent(s): 3235399

Add Croissant + RAI metadata

Browse files
Files changed (1) hide show
  1. croissant.json +84 -0
croissant.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "@context": {
3
+ "@language": "en",
4
+ "@vocab": "https://schema.org/",
5
+ "citeAs": "cr:citeAs",
6
+ "column": "cr:column",
7
+ "conformsTo": "dct:conformsTo",
8
+ "cr": "http://mlcommons.org/croissant/",
9
+ "rai": "http://mlcommons.org/croissant/RAI/",
10
+ "data": {
11
+ "@id": "cr:data",
12
+ "@type": "@json"
13
+ },
14
+ "dataType": {
15
+ "@id": "cr:dataType",
16
+ "@type": "@vocab"
17
+ },
18
+ "dct": "http://purl.org/dc/terms/",
19
+ "examples": {
20
+ "@id": "cr:examples",
21
+ "@type": "@json"
22
+ },
23
+ "extract": "cr:extract",
24
+ "field": "cr:field",
25
+ "fileProperty": "cr:fileProperty",
26
+ "fileObject": "cr:fileObject",
27
+ "fileSet": "cr:fileSet",
28
+ "format": "cr:format",
29
+ "includes": "cr:includes",
30
+ "isLiveDataset": "cr:isLiveDataset",
31
+ "jsonPath": "cr:jsonPath",
32
+ "key": "cr:key",
33
+ "md5": "cr:md5",
34
+ "parentField": "cr:parentField",
35
+ "path": "cr:path",
36
+ "recordSet": "cr:recordSet",
37
+ "references": "cr:references",
38
+ "regex": "cr:regex",
39
+ "repeated": "cr:repeated",
40
+ "replace": "cr:replace",
41
+ "sc": "https://schema.org/",
42
+ "separator": "cr:separator",
43
+ "source": "cr:source",
44
+ "subField": "cr:subField",
45
+ "transform": "cr:transform"
46
+ },
47
+ "@type": "sc:Dataset",
48
+ "name": "VBVR-MultiStep-Bench",
49
+ "conformsTo": "http://mlcommons.org/croissant/1.0",
50
+ "description": "The frozen 180-instance public evaluation split of the VBVR-MultiStep benchmark for long-horizon multi-step image-to-video reasoning. 36 parameterized tasks across six reasoning families (Navigation, Planning, CSP, Execution, Geometry, Physics). Each instance follows a five-artifact contract: first_frame.png, prompt.txt, final_frame.png, ground_truth.mp4, question_metadata.json.",
51
+ "alternateName": ["VBVR-MultiStep Evaluation Split"],
52
+ "creator": {
53
+ "@type": "sc:Organization",
54
+ "name": "Video-Reason",
55
+ "url": "https://video-reason.com"
56
+ },
57
+ "datePublished": "2026-05-06",
58
+ "keywords": ["video reasoning", "multi-step reasoning", "long-horizon", "image-to-video", "benchmark", "synthetic"],
59
+ "license": "https://creativecommons.org/licenses/by/4.0/",
60
+ "url": "https://huggingface.co/datasets/Video-Reason/VBVR-MultiStep-Bench",
61
+ "version": "1.0.0",
62
+ "isLiveDataset": false,
63
+ "rai:dataCollection": "Fully synthetic. Every instance is procedurally produced by a deterministic per-task generator that consumes only released task definitions and a seed. There is no scraping, no human subjects, no third-party media, and no annotation of pre-existing content. Generators run as released code; instance manifests record the seed, the generator version, and per-task tolerances for exact reproduction.",
64
+ "rai:dataCollectionType": ["Synthetic"],
65
+ "rai:dataPreprocessingProtocol": "No post-collection preprocessing is applied. Each task's generator emits the five-artifact contract (first_frame.png, prompt.txt, final_frame.png, ground_truth.mp4, question_metadata.json) directly. Sample index, seed, and version are recorded in question_metadata.json so any instance is regenerable from the released code.",
66
+ "rai:dataAnnotationProtocol": "No human annotation. The reference rollout (ground_truth.mp4) is computed by the generator's deterministic ground-truth solver and is not produced by humans.",
67
+ "rai:dataAnnotationPlatform": "N/A (no annotation).",
68
+ "rai:dataReleaseMaintenancePlan": "Versioned releases on the Hugging Face Hub. The Croissant file in this repository is the canonical long-term record. Issues and breaking changes will be documented in repository commits and the dataset version field.",
69
+ "rai:dataLimitations": [
70
+ "Tasks are stylized and synthetic; transfer to unconstrained open-world video is not validated.",
71
+ "Visual style is generator-controlled and intentionally simplified to keep symbolic state recoverable; this is not a photorealism benchmark.",
72
+ "The CSP family ships 6 tasks but only 3 (Multi-13/14/15 excluded) are used in the human-judging pool described in the companion paper.",
73
+ "Reference rollouts encode one valid trajectory per instance; alternative valid trajectories are not enumerated."
74
+ ],
75
+ "rai:dataBiases": [
76
+ "Distribution biases inherited from generator parameter ranges: e.g., maze sizes, planning horizons, physics regimes are drawn from fixed bounded distributions and do not span the long tail of real-world settings.",
77
+ "Family balance is uniform (6 tasks per family) by design; this is a deliberate evaluation choice and does not reflect natural prevalence of these reasoning patterns.",
78
+ "Visual rendering is monocular, planar, and stylized; appearance distribution does not approximate any real-world video corpus.",
79
+ "No demographic content is generated; bias along human demographic axes does not apply to this dataset."
80
+ ],
81
+ "rai:personalSensitiveInformation": "None. The dataset contains no personal information, no biometric data, no demographic information, and no human subjects. All visual content is procedurally generated geometric / symbolic / physical scenes.",
82
+ "rai:dataUseCases": "Trajectory-level evaluation of image-to-video systems on long-horizon, rule-grounded tasks. Suitable for: (a) blind human pairwise comparison across model families on process correctness, fidelity, and render-quality axes; (b) automated reference-trajectory comparison once parsers for dense video state become reliable; (c) family-localized failure-mode diagnosis. The companion paper validates use case (a). Use cases (b) and (c) are not validated by this release.",
83
+ "rai:dataSocialImpact": "Intended for research on reasoning evaluation in video generation. Risks are minimal: the dataset is synthetic, free of personal content, and designed for evaluation rather than deployment. Potential for misuse is low; the most plausible negative impact is overfitting research on stylized rule-checked tasks at the expense of unconstrained video understanding, which we mitigate by clearly scoping the contribution as a complement to (not a replacement for) appearance-centric evaluation."
84
+ }