orionweller commited on
Commit
739ac0a
1 Parent(s): 9728d91

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +27 -0
  2. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10397-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  3. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1207-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  4. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1207-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  5. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13021-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  6. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13021-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  7. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15589-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  8. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15589-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  9. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17269-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  10. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17269-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  11. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18153-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  12. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18153-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  13. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24500-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  14. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24500-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  15. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26403-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  16. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26403-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  17. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28536-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  18. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28536-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  19. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28905-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28905-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31747-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31747-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33049-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33049-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35848-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35848-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3991-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3991-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40775-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40775-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44177-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44177-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49106-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49106-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5143-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5143-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54064-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54064-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56833-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57086-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57086-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60240-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60240-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62383-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62383-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63271-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63271-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63978-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63978-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64738-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
.gitattributes CHANGED
@@ -22578,3 +22578,30 @@ train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-n
22578
  train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
22579
  train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
22580
  train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22578
  train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
22579
  train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
22580
  train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
22581
+ train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
22582
+ train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
22583
+ train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
22584
+ train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
22585
+ train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
22586
+ train/stackexchange/stackexchange_0024-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
22587
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
22588
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
22589
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
22590
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
22591
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
22592
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
22593
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
22594
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
22595
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
22596
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
22597
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
22598
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
22599
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
22600
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
22601
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
22602
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
22603
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
22604
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text
22605
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
22606
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
22607
+ train/stackexchange/stackexchange_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10397-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108679, "hashes": {}}, "samples": 42742, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47707288, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17802217, "hashes": {}}, "samples": 11789, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12667375, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1207-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108585, "hashes": {}}, "samples": 43863, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47599424, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14760640, "hashes": {}}, "samples": 9670, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10462094, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1207-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39277989,
3
+ "num_truncated_tokens": 39245015
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13021-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107477, "hashes": {}}, "samples": 44389, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47735230, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8688631, "hashes": {}}, "samples": 5776, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6192598, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13021-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36345531,
3
+ "num_truncated_tokens": 36318859
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15589-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108786, "hashes": {}}, "samples": 42805, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47860463, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19628373, "hashes": {}}, "samples": 12549, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13953250, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15589-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41655115,
3
+ "num_truncated_tokens": 41619447
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17269-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108800, "hashes": {}}, "samples": 43943, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47582200, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11746443, "hashes": {}}, "samples": 7653, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8342901, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17269-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37830665,
3
+ "num_truncated_tokens": 37801480
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18153-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108385, "hashes": {}}, "samples": 43823, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47722356, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12728966, "hashes": {}}, "samples": 8478, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9086959, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18153-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38300096,
3
+ "num_truncated_tokens": 38269735
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24500-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108678, "hashes": {}}, "samples": 42501, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47608621, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22936437, "hashes": {}}, "samples": 14594, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16285173, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24500-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 43255229,
3
+ "num_truncated_tokens": 43216466
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26403-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108609, "hashes": {}}, "samples": 43817, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47033122, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12802638, "hashes": {}}, "samples": 8515, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9104944, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26403-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38336045,
3
+ "num_truncated_tokens": 38305202
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28536-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107685, "hashes": {}}, "samples": 42666, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47732684, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21204040, "hashes": {}}, "samples": 13472, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15023002, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28536-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42418200,
3
+ "num_truncated_tokens": 42381337
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28905-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108276, "hashes": {}}, "samples": 43290, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47676761, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17892709, "hashes": {}}, "samples": 11389, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12666236, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28905-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40808060,
3
+ "num_truncated_tokens": 40774067
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31747-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108093, "hashes": {}}, "samples": 44072, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47630829, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12501589, "hashes": {}}, "samples": 8408, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8925724, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31747-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38180874,
3
+ "num_truncated_tokens": 38149656
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33049-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108283, "hashes": {}}, "samples": 43580, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47833938, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13758617, "hashes": {}}, "samples": 9022, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9746043, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33049-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38805294,
3
+ "num_truncated_tokens": 38774365
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35848-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108328, "hashes": {}}, "samples": 44560, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47952627, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9019333, "hashes": {}}, "samples": 6029, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6438521, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35848-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36498324,
3
+ "num_truncated_tokens": 36470247
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3991-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107533, "hashes": {}}, "samples": 45169, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48024884, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6600210, "hashes": {}}, "samples": 4353, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4699725, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3991-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35321493,
3
+ "num_truncated_tokens": 35295137
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40775-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108381, "hashes": {}}, "samples": 44523, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47860285, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9731264, "hashes": {}}, "samples": 6376, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6938305, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40775-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36844691,
3
+ "num_truncated_tokens": 36816390
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44177-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108495, "hashes": {}}, "samples": 43787, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47362864, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10898530, "hashes": {}}, "samples": 7304, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7742785, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44177-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37422122,
3
+ "num_truncated_tokens": 37394007
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49106-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107711, "hashes": {}}, "samples": 43230, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47794094, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18184567, "hashes": {}}, "samples": 11571, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12889177, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49106-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40949882,
3
+ "num_truncated_tokens": 40915122
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5143-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108565, "hashes": {}}, "samples": 43061, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47616358, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19709423, "hashes": {}}, "samples": 12508, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13950316, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5143-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41689114,
3
+ "num_truncated_tokens": 41652950
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54064-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108821, "hashes": {}}, "samples": 42546, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47569458, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22440939, "hashes": {}}, "samples": 14167, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16009891, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54064-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 43019286,
3
+ "num_truncated_tokens": 42981364
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56833-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42363445,
3
+ "num_truncated_tokens": 42326734
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57086-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107588, "hashes": {}}, "samples": 42644, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47501317, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21993285, "hashes": {}}, "samples": 13927, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15477015, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57086-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42799334,
3
+ "num_truncated_tokens": 42761774
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60240-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108318, "hashes": {}}, "samples": 44993, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47893454, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7386908, "hashes": {}}, "samples": 4916, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5269520, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60240-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35703272,
3
+ "num_truncated_tokens": 35675649
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62383-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108837, "hashes": {}}, "samples": 44729, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47957036, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7593010, "hashes": {}}, "samples": 5245, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5486263, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62383-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35804460,
3
+ "num_truncated_tokens": 35777352
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63271-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106801, "hashes": {}}, "samples": 43839, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47593994, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11753699, "hashes": {}}, "samples": 7787, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8328815, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63271-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37832399,
3
+ "num_truncated_tokens": 37803217
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63978-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107528, "hashes": {}}, "samples": 43308, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47702825, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18585386, "hashes": {}}, "samples": 11840, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13165876, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63978-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41139515,
3
+ "num_truncated_tokens": 41104448
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64738-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107881, "hashes": {}}, "samples": 43934, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47723703, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12130186, "hashes": {}}, "samples": 8085, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8649986, "hashes": {}}}], "version": 2}