EvalData commited on
Commit
b4b782e
·
verified ·
1 Parent(s): 285f7ca

Fix Croissant: rename rai:dataPersonalSensitiveInformation -> rai:personalSensitiveInformation, add rai:hasSyntheticData=false, switch contentUrls to absolute HF URLs, drop nested-JSON vision recordSet

Browse files
Files changed (1) hide show
  1. data/croissant.json +8 -67
data/croissant.json CHANGED
@@ -82,7 +82,9 @@
82
 
83
  "rai:dataBiases": "The 88-paper meta-analysis (data/meta_analysis.csv) is biased toward vision (51/88) over language (16) and audio (3), reflecting publication frequency in representational similarity research up to 2026. The vision atlas is biased toward English-language web/image-corpus pretraining (CLIP, DINOv2 trained on LVD-142M; ImageNet-trained supervised baselines). The LLM atlas is English-only. Family selection is biased toward openly available checkpoints; closed-source models (GPT-4 class) are excluded by necessity, and recent permissively licensed code-generation or multilingual models may be under-represented.",
84
 
85
- "rai:dataPersonalSensitiveInformation": "The release contains no personal or sensitive information. All four atlases are statistical summaries (similarity scores, regime labels) over public benchmark stimuli (CIFAR-100, STL-10, WikiText-103, LibriSpeech test-clean), and the released feature tensors are deterministic activations of public pretrained models on these public benchmarks. CIFAR-100 and STL-10 contain no personal data. LibriSpeech test-clean contains read public-domain audiobook recordings with speaker IDs released by the LibriVox/LibriSpeech project; we use only model activations, not raw audio. WikiText-103 is derived from Wikipedia featured/good articles. No demographic, health, financial, political, or religious information is collected, inferred, or released.",
 
 
86
 
87
  "rai:dataUseCases": "Intended uses (validated in the accompanying paper): (a) compatibility screening between candidate model pairs prior to model-stitching or knowledge-transfer experiments; (b) reproducing and auditing the headline statistical findings of the paper (mixed-effects family beta=0.20, block-bootstrap rho=-0.70, retrieval rho=0.76); (c) extending the atlas with new encoders by re-running the extraction pipeline; (d) regression testing for representation-similarity research that builds on CKA, mutual k-NN, Procrustes-based transport, or effective-rank proxies. Out-of-scope uses (NOT validated and explicitly not recommended): (i) deployment-time decisions about whether two production models are interchangeable in a downstream application; (ii) safety or fairness certification of any individual encoder; (iii) inference about model training data, intellectual property, or copyright provenance from feature-space similarity.",
88
 
@@ -112,7 +114,7 @@
112
  "@id": "atlas-vision",
113
  "name": "atlas.json",
114
  "description": "Vision compatibility atlas: 190 pairs across 20 pretrained encoders on CIFAR-100 test (5000 images). Each pair has six BCCT metrics and a regime label.",
115
- "contentUrl": "data/atlas.json",
116
  "encodingFormat": "application/json",
117
  "sha256": "c8826acc8352017e5f3e0c599666f71ee367fdf1677fa9cd4d6c6b64aa9a6a77"
118
  },
@@ -121,7 +123,7 @@
121
  "@id": "atlas-llm",
122
  "name": "llm_atlas.json",
123
  "description": "Language compatibility atlas: 36 pairs across 9 base LLMs on WikiText-103 (2000 passages of 128 tokens).",
124
- "contentUrl": "data/llm_atlas.json",
125
  "encodingFormat": "application/json",
126
  "sha256": "56543b9b58b2855a64749aa9a158cdcb9a06fcf7f1bfc662ebe7afb1026b28dd"
127
  },
@@ -130,7 +132,7 @@
130
  "@id": "atlas-audio",
131
  "name": "audio_atlas.json",
132
  "description": "Preliminary audio compatibility atlas: 15 pairs across 6 audio encoders on LibriSpeech test-clean.",
133
- "contentUrl": "data/audio_atlas.json",
134
  "encodingFormat": "application/json",
135
  "sha256": "f6c09f96fe301fc8156421a18d10534ea704cea0c598aac87bb9f4e0142c20a5"
136
  },
@@ -139,7 +141,7 @@
139
  "@id": "atlas-video",
140
  "name": "video_atlas.json",
141
  "description": "Exploratory video compatibility atlas: 15 pairs across 6 video encoders on STL-10 pseudo-clips.",
142
- "contentUrl": "data/video_atlas.json",
143
  "encodingFormat": "application/json",
144
  "sha256": "189e11c2a70eb7ec96a3dd112d86b9ffa0651745c8d6ddfcec67a041f4b6e752"
145
  },
@@ -148,7 +150,7 @@
148
  "@id": "meta-analysis-csv",
149
  "name": "meta_analysis.csv",
150
  "description": "88-paper meta-analysis extraction (cite key, year, thread, domain, scale tier, metric type, reported value, inferred BCCT regime, key finding).",
151
- "contentUrl": "data/meta_analysis.csv",
152
  "encodingFormat": "text/csv",
153
  "sha256": "d9e1c0d0eb70ec4d9d4036465643696a0b03222ae0783084c82fda40ed43b0d8"
154
  },
@@ -191,67 +193,6 @@
191
  ],
192
 
193
  "recordSet": [
194
- {
195
- "@type": "cr:RecordSet",
196
- "@id": "vision-pair-records",
197
- "name": "vision-pairwise",
198
- "description": "Per-pair records of the vision compatibility atlas. Each record is a unique pair of two of the 20 atlas encoders.",
199
- "field": [
200
- {
201
- "@type": "cr:Field",
202
- "@id": "vision-pair-records/pair_key",
203
- "name": "pair_key",
204
- "description": "Human-readable pair name in the form 'encoder_a <-> encoder_b'.",
205
- "dataType": "sc:Text",
206
- "source": {
207
- "fileObject": {"@id": "atlas-vision"},
208
- "extract": {"jsonPath": "$.pairwise[*]"}
209
- }
210
- },
211
- {
212
- "@type": "cr:Field",
213
- "@id": "vision-pair-records/s_local",
214
- "name": "s_local",
215
- "description": "Null-calibrated mutual k-NN overlap (k=10, 1000 permutations).",
216
- "dataType": "sc:Float"
217
- },
218
- {
219
- "@type": "cr:Field",
220
- "@id": "vision-pair-records/s_global",
221
- "name": "s_global",
222
- "description": "Linear centered kernel alignment (CKA) on Gram matrices.",
223
- "dataType": "sc:Float"
224
- },
225
- {
226
- "@type": "cr:Field",
227
- "@id": "vision-pair-records/tau",
228
- "name": "tau",
229
- "description": "Bidirectional conservative transport linearity score: min(tau_AB, tau_BA), per-dimension R^2 ratio of orthogonal Procrustes to a standardized 2-layer MLP probe on 80/20 train/test splits.",
230
- "dataType": "sc:Float"
231
- },
232
- {
233
- "@type": "cr:Field",
234
- "@id": "vision-pair-records/tai",
235
- "name": "tai",
236
- "description": "Transport asymmetry index: |perf(A->B) - perf(B->A)| / max.",
237
- "dataType": "sc:Float"
238
- },
239
- {
240
- "@type": "cr:Field",
241
- "@id": "vision-pair-records/delta",
242
- "name": "delta",
243
- "description": "Bottleneck mismatch: absolute difference of effective-rank bitrate proxies.",
244
- "dataType": "sc:Float"
245
- },
246
- {
247
- "@type": "cr:Field",
248
- "@id": "vision-pair-records/regime",
249
- "name": "regime",
250
- "description": "BCCT regime classification: one of {Collapsed, Local-Only, Convergent, Divergent}.",
251
- "dataType": "sc:Text"
252
- }
253
- ]
254
- },
255
  {
256
  "@type": "cr:RecordSet",
257
  "@id": "meta-analysis-records",
 
82
 
83
  "rai:dataBiases": "The 88-paper meta-analysis (data/meta_analysis.csv) is biased toward vision (51/88) over language (16) and audio (3), reflecting publication frequency in representational similarity research up to 2026. The vision atlas is biased toward English-language web/image-corpus pretraining (CLIP, DINOv2 trained on LVD-142M; ImageNet-trained supervised baselines). The LLM atlas is English-only. Family selection is biased toward openly available checkpoints; closed-source models (GPT-4 class) are excluded by necessity, and recent permissively licensed code-generation or multilingual models may be under-represented.",
84
 
85
+ "rai:personalSensitiveInformation": "The release contains no personal or sensitive information. All four atlases are statistical summaries (similarity scores, regime labels) over public benchmark stimuli (CIFAR-100, STL-10, WikiText-103, LibriSpeech test-clean), and the released feature tensors are deterministic activations of public pretrained models on these public benchmarks. CIFAR-100 and STL-10 contain no personal data. LibriSpeech test-clean contains read public-domain audiobook recordings with speaker IDs released by the LibriVox/LibriSpeech project; we use only model activations, not raw audio. WikiText-103 is derived from Wikipedia featured/good articles. No demographic, health, financial, political, or religious information is collected, inferred, or released.",
86
+ "rai:hasSyntheticData": false,
87
+ "rai:syntheticDataDescription": "No synthetic data is used or released. All numerical artifacts are deterministic outputs of public pretrained models evaluated on public real-world benchmarks (CIFAR-100, STL-10, WikiText-103, LibriSpeech test-clean).",
88
 
89
  "rai:dataUseCases": "Intended uses (validated in the accompanying paper): (a) compatibility screening between candidate model pairs prior to model-stitching or knowledge-transfer experiments; (b) reproducing and auditing the headline statistical findings of the paper (mixed-effects family beta=0.20, block-bootstrap rho=-0.70, retrieval rho=0.76); (c) extending the atlas with new encoders by re-running the extraction pipeline; (d) regression testing for representation-similarity research that builds on CKA, mutual k-NN, Procrustes-based transport, or effective-rank proxies. Out-of-scope uses (NOT validated and explicitly not recommended): (i) deployment-time decisions about whether two production models are interchangeable in a downstream application; (ii) safety or fairness certification of any individual encoder; (iii) inference about model training data, intellectual property, or copyright provenance from feature-space similarity.",
90
 
 
114
  "@id": "atlas-vision",
115
  "name": "atlas.json",
116
  "description": "Vision compatibility atlas: 190 pairs across 20 pretrained encoders on CIFAR-100 test (5000 images). Each pair has six BCCT metrics and a regime label.",
117
+ "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/atlas.json",
118
  "encodingFormat": "application/json",
119
  "sha256": "c8826acc8352017e5f3e0c599666f71ee367fdf1677fa9cd4d6c6b64aa9a6a77"
120
  },
 
123
  "@id": "atlas-llm",
124
  "name": "llm_atlas.json",
125
  "description": "Language compatibility atlas: 36 pairs across 9 base LLMs on WikiText-103 (2000 passages of 128 tokens).",
126
+ "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/llm_atlas.json",
127
  "encodingFormat": "application/json",
128
  "sha256": "56543b9b58b2855a64749aa9a158cdcb9a06fcf7f1bfc662ebe7afb1026b28dd"
129
  },
 
132
  "@id": "atlas-audio",
133
  "name": "audio_atlas.json",
134
  "description": "Preliminary audio compatibility atlas: 15 pairs across 6 audio encoders on LibriSpeech test-clean.",
135
+ "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/audio_atlas.json",
136
  "encodingFormat": "application/json",
137
  "sha256": "f6c09f96fe301fc8156421a18d10534ea704cea0c598aac87bb9f4e0142c20a5"
138
  },
 
141
  "@id": "atlas-video",
142
  "name": "video_atlas.json",
143
  "description": "Exploratory video compatibility atlas: 15 pairs across 6 video encoders on STL-10 pseudo-clips.",
144
+ "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/video_atlas.json",
145
  "encodingFormat": "application/json",
146
  "sha256": "189e11c2a70eb7ec96a3dd112d86b9ffa0651745c8d6ddfcec67a041f4b6e752"
147
  },
 
150
  "@id": "meta-analysis-csv",
151
  "name": "meta_analysis.csv",
152
  "description": "88-paper meta-analysis extraction (cite key, year, thread, domain, scale tier, metric type, reported value, inferred BCCT regime, key finding).",
153
+ "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/meta_analysis.csv",
154
  "encodingFormat": "text/csv",
155
  "sha256": "d9e1c0d0eb70ec4d9d4036465643696a0b03222ae0783084c82fda40ed43b0d8"
156
  },
 
193
  ],
194
 
195
  "recordSet": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  {
197
  "@type": "cr:RecordSet",
198
  "@id": "meta-analysis-records",