Datasets:

Languages:
English
ArXiv:
License:
orionweller commited on
Commit
d63b42b
·
verified ·
1 Parent(s): 3c4dc37

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. train/dclm-dolmino/split_10317-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  2. train/dclm-dolmino/split_10317-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  3. train/dclm-dolmino/split_10317-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  4. train/dclm-dolmino/split_12373-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  5. train/dclm-dolmino/split_12373-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  6. train/dclm-dolmino/split_12373-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  7. train/dclm-dolmino/split_12852-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  8. train/dclm-dolmino/split_12852-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  9. train/dclm-dolmino/split_12852-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  10. train/dclm-dolmino/split_12863-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  11. train/dclm-dolmino/split_12863-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  12. train/dclm-dolmino/split_12863-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  13. train/dclm-dolmino/split_13048-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  14. train/dclm-dolmino/split_13048-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  15. train/dclm-dolmino/split_13048-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  16. train/dclm-dolmino/split_14020-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  17. train/dclm-dolmino/split_14020-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  18. train/dclm-dolmino/split_14020-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  19. train/dclm-dolmino/split_14197-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  20. train/dclm-dolmino/split_14197-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  21. train/dclm-dolmino/split_14197-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  22. train/dclm-dolmino/split_14297-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  23. train/dclm-dolmino/split_14297-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  24. train/dclm-dolmino/split_14297-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  25. train/dclm-dolmino/split_14692-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  26. train/dclm-dolmino/split_14692-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  27. train/dclm-dolmino/split_14692-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  28. train/dclm-dolmino/split_14965-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  29. train/dclm-dolmino/split_14965-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  30. train/dclm-dolmino/split_14965-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  31. train/dclm-dolmino/split_14983-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  32. train/dclm-dolmino/split_14983-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  33. train/dclm-dolmino/split_15962-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  34. train/dclm-dolmino/split_15962-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  35. train/dclm-dolmino/split_15962-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  36. train/dclm-dolmino/split_15974-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  37. train/dclm-dolmino/split_15974-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  38. train/dclm-dolmino/split_15974-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  39. train/dclm-dolmino/split_16339-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  40. train/dclm-dolmino/split_16339-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  41. train/dclm-dolmino/split_16339-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  42. train/dclm-dolmino/split_16467-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  43. train/dclm-dolmino/split_17471-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  44. train/dclm-dolmino/split_17471-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  45. train/dclm-dolmino/split_17471-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  46. train/dclm-dolmino/split_2813-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  47. train/dclm-dolmino/split_2813-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
  48. train/dclm-dolmino/split_2813-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json +0 -0
  49. train/dclm-dolmino/split_3395-tokenized-chunked-8000-512-128-backfill-nodups/index.json +1 -0
  50. train/dclm-dolmino/split_3395-tokenized-chunked-8000-512-128-backfill-nodups/stats.json +1 -0
train/dclm-dolmino/split_10317-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107765, "hashes": {}}, "samples": 26292, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48354098, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11211365, "hashes": {}}, "samples": 4376, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8082085, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_10317-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37862745, "total_tokens_skipped": 589, "percentiles": {"0th": 45, "10th": 229, "20th": 346, "30th": 462, "40th": 589, "50th": 730, "60th": 907, "70th": 1160, "80th": 1584, "90th": 2698, "95th": 4550, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_10317-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_12373-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107826, "hashes": {}}, "samples": 26879, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47627925, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9193274, "hashes": {}}, "samples": 3676, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6622121, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_12373-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 36784170, "total_tokens_skipped": 823, "percentiles": {"0th": 41, "10th": 225, "20th": 331, "30th": 442, "40th": 575, "50th": 718, "60th": 903, "70th": 1159, "80th": 1570, "90th": 2615, "95th": 4244, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_12373-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_12852-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107709, "hashes": {}}, "samples": 26440, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48406998, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10986666, "hashes": {}}, "samples": 4176, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7918352, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_12852-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37750593, "total_tokens_skipped": 987, "percentiles": {"0th": 42, "10th": 237, "20th": 351, "30th": 470, "40th": 596, "50th": 733, "60th": 907, "70th": 1160, "80th": 1593, "90th": 2696, "95th": 4397, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_12852-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_12863-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106264, "hashes": {}}, "samples": 26774, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48379206, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8702534, "hashes": {}}, "samples": 3712, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6276168, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_12863-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 36565734, "total_tokens_skipped": 1063, "percentiles": {"0th": 36, "10th": 238, "20th": 370, "30th": 497, "40th": 629, "50th": 770, "60th": 947, "70th": 1179, "80th": 1555, "90th": 2521, "95th": 3892, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_12863-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_13048-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67104533, "hashes": {}}, "samples": 26608, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48264404, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10751889, "hashes": {}}, "samples": 3983, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7730711, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_13048-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37593636, "total_tokens_skipped": 711, "percentiles": {"0th": 43, "10th": 239, "20th": 362, "30th": 486, "40th": 616, "50th": 757, "60th": 944, "70th": 1198, "80th": 1603, "90th": 2614, "95th": 4175, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_13048-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_14020-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108421, "hashes": {}}, "samples": 25716, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48240586, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13530398, "hashes": {}}, "samples": 4966, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9694682, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_14020-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38973965, "total_tokens_skipped": 673, "percentiles": {"0th": 36, "10th": 236, "20th": 367, "30th": 489, "40th": 621, "50th": 768, "60th": 958, "70th": 1216, "80th": 1644, "90th": 2727, "95th": 4566, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_14020-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_14197-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107272, "hashes": {}}, "samples": 25518, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48076473, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12992064, "hashes": {}}, "samples": 5074, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9324291, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_14197-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38690371, "total_tokens_skipped": 597, "percentiles": {"0th": 39, "10th": 253, "20th": 390, "30th": 524, "40th": 663, "50th": 817, "60th": 1005, "70th": 1256, "80th": 1658, "90th": 2600, "95th": 4134, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_14197-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_14297-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108863, "hashes": {}}, "samples": 26906, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48431053, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9403326, "hashes": {}}, "samples": 3673, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6745997, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_14297-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 36948321, "total_tokens_skipped": 751, "percentiles": {"0th": 38, "10th": 236, "20th": 357, "30th": 478, "40th": 606, "50th": 743, "60th": 920, "70th": 1158, "80th": 1560, "90th": 2565, "95th": 4110, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_14297-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_14692-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67105400, "hashes": {}}, "samples": 25966, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48093783, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12023136, "hashes": {}}, "samples": 4730, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8634676, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_14692-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38225523, "total_tokens_skipped": 1610, "percentiles": {"0th": 38, "10th": 234, "20th": 346, "30th": 470, "40th": 606, "50th": 752, "60th": 932, "70th": 1187, "80th": 1606, "90th": 2654, "95th": 4454, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_14692-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_14965-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67104469, "hashes": {}}, "samples": 25747, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48113995, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12755123, "hashes": {}}, "samples": 4952, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9165173, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_14965-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38605858, "total_tokens_skipped": 729, "percentiles": {"0th": 39, "10th": 222, "20th": 329, "30th": 447, "40th": 576, "50th": 729, "60th": 919, "70th": 1183, "80th": 1631, "90th": 2813, "95th": 4775, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_14965-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_14983-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108860, "hashes": {}}, "samples": 26214, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48368660, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11108573, "hashes": {}}, "samples": 4423, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8011992, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_14983-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37794369, "total_tokens_skipped": 1262, "percentiles": {"0th": 39, "10th": 236, "20th": 359, "30th": 480, "40th": 608, "50th": 749, "60th": 931, "70th": 1184, "80th": 1593, "90th": 2607, "95th": 4318, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_15962-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67095299, "hashes": {}}, "samples": 25505, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48136670, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13203871, "hashes": {}}, "samples": 5154, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9477672, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_15962-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38807082, "total_tokens_skipped": 1426, "percentiles": {"0th": 36, "10th": 234, "20th": 353, "30th": 478, "40th": 611, "50th": 757, "60th": 948, "70th": 1223, "80th": 1669, "90th": 2755, "95th": 4529, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_15962-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_15974-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67105415, "hashes": {}}, "samples": 24507, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48246018, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16576325, "hashes": {}}, "samples": 6284, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11927760, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_15974-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 40510536, "total_tokens_skipped": 954, "percentiles": {"0th": 45, "10th": 237, "20th": 358, "30th": 484, "40th": 620, "50th": 773, "60th": 965, "70th": 1242, "80th": 1732, "90th": 2933, "95th": 4923, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_15974-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_16339-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108238, "hashes": {}}, "samples": 26512, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48300144, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10299137, "hashes": {}}, "samples": 4012, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7399808, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_16339-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 37351615, "total_tokens_skipped": 1068, "percentiles": {"0th": 37, "10th": 243, "20th": 371, "30th": 500, "40th": 631, "50th": 773, "60th": 954, "70th": 1206, "80th": 1604, "90th": 2557, "95th": 4078, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_16339-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_16467-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67105733, "hashes": {}}, "samples": 24554, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48158231, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15728032, "hashes": {}}, "samples": 6164, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11353821, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_17471-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108664, "hashes": {}}, "samples": 24159, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48117696, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16569057, "hashes": {}}, "samples": 6602, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11908256, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_17471-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 40499579, "total_tokens_skipped": 1092, "percentiles": {"0th": 44, "10th": 236, "20th": 362, "30th": 492, "40th": 628, "50th": 779, "60th": 979, "70th": 1254, "80th": 1726, "90th": 2911, "95th": 4862, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_17471-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_2813-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108072, "hashes": {}}, "samples": 25465, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48237063, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13539117, "hashes": {}}, "samples": 5195, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9741384, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_2813-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38973656, "total_tokens_skipped": 910, "percentiles": {"0th": 39, "10th": 239, "20th": 367, "30th": 490, "40th": 624, "50th": 773, "60th": 962, "70th": 1223, "80th": 1650, "90th": 2723, "95th": 4538, "99th": 7999, "100th": 8000}}
train/dclm-dolmino/split_2813-tokenized-chunked-8000-512-128-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/dclm-dolmino/split_3395-tokenized-chunked-8000-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67105734, "hashes": {}}, "samples": 25804, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48102894, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12387502, "hashes": {}}, "samples": 4777, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8865260, "hashes": {}}}], "version": 2}
train/dclm-dolmino/split_3395-tokenized-chunked-8000-512-128-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 38393120, "total_tokens_skipped": 871, "percentiles": {"0th": 48, "10th": 250, "20th": 389, "30th": 521, "40th": 650, "50th": 799, "60th": 979, "70th": 1225, "80th": 1635, "90th": 2612, "95th": 4186, "99th": 7999, "100th": 8000}}