orionweller commited on
Commit
28680b3
1 Parent(s): 9f538c2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +28 -0
  2. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  3. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  4. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  5. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  6. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds +3 -0
  7. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds +3 -0
  8. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds +3 -0
  9. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
  10. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds +3 -0
  11. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds +3 -0
  12. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
  13. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds +3 -0
  14. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
  15. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds +3 -0
  16. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds +3 -0
  17. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds +3 -0
  18. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds +3 -0
  19. train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds +3 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13494-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13494-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17276-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17276-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22186-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22186-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23005-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23005-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30710-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30710-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31621-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31621-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31754-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31754-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35392-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35392-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39428-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39428-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40600-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40600-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4656-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4656-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4680-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4680-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47344-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47344-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60963-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60963-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62859-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62859-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64241-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
.gitattributes CHANGED
@@ -27172,3 +27172,31 @@ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-
27172
  train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
27173
  train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
27174
  train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27172
  train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
27173
  train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
27174
  train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
27175
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
27176
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds filter=lfs diff=lfs merge=lfs -text
27177
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
27178
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27179
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text
27180
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds filter=lfs diff=lfs merge=lfs -text
27181
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds filter=lfs diff=lfs merge=lfs -text
27182
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
27183
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
27184
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text
27185
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
27186
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
27187
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds filter=lfs diff=lfs merge=lfs -text
27188
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
27189
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text
27190
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
27191
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
27192
+ train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
27193
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_43549-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27194
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_30945-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27195
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_94870-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27196
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_65310-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27197
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_94870-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27198
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_41248-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27199
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_41248-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27200
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_41958-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27201
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_39432-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27202
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_41958-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f79008edf7d6bbdb3f475055b7ff16f08684e94b6752d03f769ca1a2ffaa22e8
3
+ size 67108488
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f2e71fc92aedc5f22e0cb27e2dc0b3a272e3d77c0e4420f0a52b92080ed81ef
3
+ size 67108272
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eda895f78517f590a50f594cdb7d138655a20ea16710745d1fcd2255ee3cf3a
3
+ size 67107387
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e52af447533b29938ee2f2861dbeac6b276c01eb1eabce1c0a36c08b9b9b3d0
3
+ size 67108536
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b168f0ef70fd19bb2858e40152f01c4d22a366301dce6dff63d1f8dcf17bc5c4
3
+ size 67108288
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2c3ac9209e60c9523160c9cad1ae532cb902a588e6346ba723dd676f0779f86
3
+ size 67107093
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0679fc855fed08a0cb45e0436e00e321f39f94c2578a9b60077bfd15e48260
3
+ size 67108787
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ead43de53d4d7b2bab4efc9bd699cc6592261fc398df8e6dcf74abfc5808282
3
+ size 67107723
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a70e5042b2e4ae87a629d88b279adc460a74f85eb9e60326e48533456ea4a5c
3
+ size 67106924
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:415f2c84c250e2b33d52d856a9d596a1ab51c6fa14278df7f9cfb8f0e878b2fc
3
+ size 67108019
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7be792d6b4244901fbe4c9a7dd667ad1a917428964cdc65c8dce9b71a280d82
3
+ size 67108346
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7996d637e6c19a30478db48a6f9fa2cbed1d477bb2f8a43ff571a45954301a47
3
+ size 67107712
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b788059203d1e33a252f54d6500f45216bb1d74c26c77f4d446e014fd734774
3
+ size 67108807
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9bbfd3641c883db0be30a3e0871bf1f5f0e5697cbeb068c66dbe345519e3604
3
+ size 67107201
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8718b0103fddf66ec45d0db79f8a3ff76b0057b8c96e0e03602dca6dd7e8442
3
+ size 67106794
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b63bbb571e937cfbeecef0406fedcf7b0d9aeb462a21fbbb5dbf5fc9b87e9b6b
3
+ size 67108272
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40824f6a9562a2c25817a3d40de8b5ff1e3ef56b6037439efe8b6a03dfa880ed
3
+ size 67106980
train/algebraic-stack/algebraic_stack_train_0014-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce388e607d8164a36992a77b18aafc964983f4e82150b5f816973af31d24d46f
3
+ size 67108241
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13494-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108571, "hashes": {}}, "samples": 42502, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47613243, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22373219, "hashes": {}}, "samples": 14223, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15875673, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13494-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42984914,
3
+ "num_truncated_tokens": 42947393
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17276-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108657, "hashes": {}}, "samples": 44536, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47775987, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11299490, "hashes": {}}, "samples": 7385, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8053454, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17276-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37597385,
3
+ "num_truncated_tokens": 37567496
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22186-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107293, "hashes": {}}, "samples": 42694, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47459596, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21560787, "hashes": {}}, "samples": 13610, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15417486, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22186-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42591112,
3
+ "num_truncated_tokens": 42553681
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23005-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108310, "hashes": {}}, "samples": 43595, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47648198, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14571023, "hashes": {}}, "samples": 9513, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10386373, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23005-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39196003,
3
+ "num_truncated_tokens": 39164306
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30710-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108697, "hashes": {}}, "samples": 44115, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47697653, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12205603, "hashes": {}}, "samples": 8026, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8665085, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30710-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38043528,
3
+ "num_truncated_tokens": 38013122
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31621-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107258, "hashes": {}}, "samples": 44186, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47928915, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11006124, "hashes": {}}, "samples": 7256, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7813022, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31621-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37464668,
3
+ "num_truncated_tokens": 37435340
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31754-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107855, "hashes": {}}, "samples": 44135, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47569704, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10603070, "hashes": {}}, "samples": 6819, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7458837, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31754-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37278335,
3
+ "num_truncated_tokens": 37250776
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35392-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106954, "hashes": {}}, "samples": 42684, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47654545, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21476040, "hashes": {}}, "samples": 13657, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15254014, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35392-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42547454,
3
+ "num_truncated_tokens": 42510731
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39428-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107819, "hashes": {}}, "samples": 43887, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47683735, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13125250, "hashes": {}}, "samples": 8610, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9387391, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39428-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38491870,
3
+ "num_truncated_tokens": 38460913
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40600-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107964, "hashes": {}}, "samples": 44219, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48042440, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10331594, "hashes": {}}, "samples": 6924, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7423429, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40600-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37137023,
3
+ "num_truncated_tokens": 37107829
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4656-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106892, "hashes": {}}, "samples": 44397, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47836023, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10155753, "hashes": {}}, "samples": 6750, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7243286, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4656-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37048527,
3
+ "num_truncated_tokens": 37019412
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4680-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107277, "hashes": {}}, "samples": 44079, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47653136, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10320953, "hashes": {}}, "samples": 6865, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7320688, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4680-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37137408,
3
+ "num_truncated_tokens": 37109571
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47344-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107670, "hashes": {}}, "samples": 44486, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47786948, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9839336, "hashes": {}}, "samples": 6427, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6949318, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47344-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36897874,
3
+ "num_truncated_tokens": 36869498
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60963-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107886, "hashes": {}}, "samples": 45077, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48055819, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6158207, "hashes": {}}, "samples": 4243, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4438934, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60963-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35106919,
3
+ "num_truncated_tokens": 35080710
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62859-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107887, "hashes": {}}, "samples": 43123, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47635870, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19136923, "hashes": {}}, "samples": 12166, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13505502, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62859-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41411110,
3
+ "num_truncated_tokens": 41375528
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64241-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108188, "hashes": {}}, "samples": 43723, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47793542, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14659891, "hashes": {}}, "samples": 9489, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10417717, "hashes": {}}}], "version": 2}