orionweller commited on
Commit
a2125ec
1 Parent(s): 0272ec3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +32 -0
  2. train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  3. train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  4. train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  5. train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  6. train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  7. train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  8. train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  9. train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  10. train/arxiv/arxiv_0016-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  11. train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  12. train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  13. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13496-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  14. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13496-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  15. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14337-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  16. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15363-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  17. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15363-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  18. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16981-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  19. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16981-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18026-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18026-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22135-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23382-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23382-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25486-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25486-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28376-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28376-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35645-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35839-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35839-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36763-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36763-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36914-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36914-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3898-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41726-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44195-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44195-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45207-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45207-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45283-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45283-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45859-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45859-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45860-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45860-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49187-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
.gitattributes CHANGED
@@ -13102,3 +13102,35 @@ train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.0000
13102
  train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
13103
  train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
13104
  train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13102
  train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
13103
  train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
13104
  train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
13105
+ train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
13106
+ train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
13107
+ train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13108
+ train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
13109
+ train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
13110
+ train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
13111
+ train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13112
+ train/arxiv/arxiv_0016-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
13113
+ train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13114
+ train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
13115
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41726-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13116
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60210-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13117
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90315-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13118
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3898-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13119
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14337-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13120
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45207-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13121
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_60210-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13122
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9744-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13123
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36763-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13124
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36763-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13125
+ train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
13126
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51335-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13127
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_51335-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13128
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15363-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13129
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9744-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13130
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15363-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13131
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76394-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13132
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89202-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13133
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89202-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13134
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95891-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13135
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_84696-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13136
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45207-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:701c6e3389334ceef495d69a755b30b675b64a8ce84279231eb81bc8a12ec688
3
+ size 67108043
train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a26d0fb97b9f50712984ff92ed800ed8806cdd8079d3e2695f772ad576e66893
3
+ size 67106974
train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4196810a310f7b1e5ce4e340c4b8a563fa6eb52ded5be19a4fbac1321cfcea9c
3
+ size 67107965
train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3016cc6d575bd3fc3e3f0be99db141d398ec4f159c993fe99471f5271174bf2
3
+ size 67107931
train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e9109626102391b81974260a2f508c00e4d121ab85217deefd2b012a139f862
3
+ size 67106829
train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7cf35a01d60c88175d32aacae679939b194ebef47de82d8dbc0b73918b4f312
3
+ size 67108625
train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60d9e5dec775fc23d059bc5730068706917572d6c9fbee36f31bc1b5665352f8
3
+ size 67108225
train/arxiv/arxiv_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65c00a2fcaab9c8fcf50f2ac85284eba8d959d6f6a021f28e7b78ad049587fee
3
+ size 54091457
train/arxiv/arxiv_0016-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca3bcca616a68e710db7ea3d1e65888a1b992198be264a33ec32ad14e313f976
3
+ size 67108070
train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaf31a8c9c3c16ffff0375584eb66ad890a80c7142ab034d7208e388f1f595c2
3
+ size 67107745
train/arxiv/arxiv_0097-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf7cc98ff986dedda0a0f14d1116cc4e34479fb3bcbb4c51eb6bd19ba37ac8b5
3
+ size 67107371
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13496-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108478, "hashes": {}}, "samples": 43195, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48040003, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15023690, "hashes": {}}, "samples": 9953, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10735492, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13496-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39420900,
3
+ "num_truncated_tokens": 39388891
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14337-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:113890bc5678ae0ff921f76b8d6130535bfe8fd0eac61f239afc4993698db276
3
+ size 10773709
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15363-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85c5cc26bcc54089c8b57cda27d643becd77566fe96780fce0d4266caa047d46
3
+ size 67107917
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15363-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56e82c255802c363925800341bfb33a24ff3415ce896b5c93c9bc4e61396d791
3
+ size 16154156
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16981-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107968, "hashes": {}}, "samples": 43987, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47814380, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12035575, "hashes": {}}, "samples": 7878, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8513101, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16981-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37966586,
3
+ "num_truncated_tokens": 37936701
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18026-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108692, "hashes": {}}, "samples": 42886, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47489575, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18671501, "hashes": {}}, "samples": 12165, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13252395, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18026-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41186154,
3
+ "num_truncated_tokens": 41151104
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22135-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107980, "hashes": {}}, "samples": 43589, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47639051, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17081457, "hashes": {}}, "samples": 10903, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12167977, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40408188,
3
+ "num_truncated_tokens": 40374406
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23382-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108307, "hashes": {}}, "samples": 44511, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47709740, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8879950, "hashes": {}}, "samples": 5870, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6349454, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23382-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36435006,
3
+ "num_truncated_tokens": 36407359
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25486-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107582, "hashes": {}}, "samples": 43856, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48025368, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11849648, "hashes": {}}, "samples": 7941, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8503312, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25486-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37875452,
3
+ "num_truncated_tokens": 37845273
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28376-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108209, "hashes": {}}, "samples": 43871, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47572874, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12088710, "hashes": {}}, "samples": 7901, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8598860, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28376-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37996116,
3
+ "num_truncated_tokens": 37966635
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35645-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107407, "hashes": {}}, "samples": 43235, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47891873, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18028863, "hashes": {}}, "samples": 11490, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12752205, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35645-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40874261,
3
+ "num_truncated_tokens": 40839464
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35839-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108335, "hashes": {}}, "samples": 43912, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47592932, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12041568, "hashes": {}}, "samples": 7816, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8547792, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_35839-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37973951,
3
+ "num_truncated_tokens": 37945019
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36763-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:961b423483e75e5841dc63059f07bbcc7c4b0e663a6b237093545b382ad677d2
3
+ size 67107622
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36763-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:702525587bb54294c3857559da221fd575a7a1727bbe67a957fb19cff2616cbc
3
+ size 9278421
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36914-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107244, "hashes": {}}, "samples": 42912, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47675625, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19618172, "hashes": {}}, "samples": 12476, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13846486, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36914-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41648232,
3
+ "num_truncated_tokens": 41612709
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3898-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:861d89bcb282cca42a439293aed6c0df53831ac0c5dc12c7d90ac4b7874cee11
3
+ size 67107577
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41726-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2f431fbb0750c1b91eb46d6af81c4cb1940bb5d0ed65b5f1011d403896c2683
3
+ size 67108787
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44195-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108329, "hashes": {}}, "samples": 44090, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47731494, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11508256, "hashes": {}}, "samples": 7433, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8211137, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44195-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37713623,
3
+ "num_truncated_tokens": 37684414
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45207-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:312a3f5baec49131d5481a3dead31ec8b49bd98a996ab35afd14b42139910fd6
3
+ size 67108160
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45207-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:973809be160fceb9035da9777f1b96962414097e853900958904de91434d16a1
3
+ size 20175623
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45283-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108836, "hashes": {}}, "samples": 44491, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47870797, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9863014, "hashes": {}}, "samples": 6511, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7015263, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45283-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36907615,
3
+ "num_truncated_tokens": 36879068
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45859-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108774, "hashes": {}}, "samples": 44014, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47850922, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12567956, "hashes": {}}, "samples": 8219, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8937686, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45859-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38221927,
3
+ "num_truncated_tokens": 38190914
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45860-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107561, "hashes": {}}, "samples": 44252, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47721277, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10022187, "hashes": {}}, "samples": 6555, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7169983, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45860-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36992437,
3
+ "num_truncated_tokens": 36964441
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49187-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107968, "hashes": {}}, "samples": 42439, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47649857, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22824607, "hashes": {}}, "samples": 14434, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16141511, "hashes": {}}}], "version": 2}