diff --git a/.gitattributes b/.gitattributes index 13a607c785e19d25d39b92492804f6b2b95fd187..c155163fca4c03d029ee4367939214d44a6202dd 100644 --- a/.gitattributes +++ b/.gitattributes @@ -22308,3 +22308,37 @@ train/stackexchange/stackexchange_0017-tokenized-chunked-1024-512-128-backfill-n train/stackexchange/stackexchange_0017-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text train/stackexchange/stackexchange_0017-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text train/stackexchange/stackexchange_0017-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10046-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10046-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..7e52bb3835031cdc6a6b70fd9c28d008a1ea5e79 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10046-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106781, "hashes": {}}, "samples": 43715, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47618129, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15324011, "hashes": {}}, "samples": 9986, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10947508, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10046-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10046-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9cf77e6a9f50ed293009cbaf1e33911a6a351798 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10046-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39553503, + "num_truncated_tokens": 39520509 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14420-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14420-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..107c0dd2f7c1b8bdc904e7a1d07d1b1bbba6a13b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14420-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107224, "hashes": {}}, "samples": 43933, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47541137, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11149184, "hashes": {}}, "samples": 7313, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7902454, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14420-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14420-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7b1bcd3414409ab2314633537f7dbe8936be73fe --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14420-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37542092, + "num_truncated_tokens": 37513292 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19692-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19692-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3a2d243ae65838ac6bf7ade544f0da81520d6f68 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19692-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107881, "hashes": {}}, "samples": 43379, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47659482, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15948461, "hashes": {}}, "samples": 10513, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11364268, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19692-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19692-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..93067ea8c688cb464435c13e894122d9c9b29d78 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19692-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39860315, + "num_truncated_tokens": 39827140 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20629-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20629-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..dced66bdf8b320e6727223365e36fa81613672ac --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20629-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107698, "hashes": {}}, "samples": 44108, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47673083, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11296231, "hashes": {}}, "samples": 7400, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8009293, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20629-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20629-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..dced0b91dae8c99ea1f2e8d8f324e0d0a7f0588b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20629-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37607837, + "num_truncated_tokens": 37578581 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23783-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23783-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b46edea7fa9c3b4a190c73eefcd8044c033ab98a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23783-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108586, "hashes": {}}, "samples": 43778, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47644202, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13199337, "hashes": {}}, "samples": 8845, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9426772, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23783-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23783-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c1cd95aa5a9d7c02ac8e7269edb26695046a0a24 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23783-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38525381, + "num_truncated_tokens": 38494025 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2435-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2435-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..89df65be67e7ece96beb2b8e7dd785ef9133dd7e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2435-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108778, "hashes": {}}, "samples": 43110, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47956199, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17461523, "hashes": {}}, "samples": 11280, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12495661, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2435-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2435-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2d25ab537e5ba4887e4f48aea7d8bb36cd0efa06 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2435-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40601543, + "num_truncated_tokens": 40567355 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2495-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2495-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5ccfb28e9bed33e1f121ce2e2528834ce24010ea --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2495-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108409, "hashes": {}}, "samples": 42623, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47696664, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21617056, "hashes": {}}, "samples": 13703, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15390617, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2495-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2495-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..eae6f28d3c03ba57d61e708f420f95b03afe6b8e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2495-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42619104, + "num_truncated_tokens": 42581455 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32097-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32097-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5f5440b5fd6a90f0c36e18dc4f9f58dc4112c8a8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32097-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106811, "hashes": {}}, "samples": 42952, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47879423, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20403789, "hashes": {}}, "samples": 12786, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14583893, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32097-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32097-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..6d666392f3aeffa8eb86d612f77493787734cc93 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32097-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42029926, + "num_truncated_tokens": 41993509 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34089-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34089-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..cd2435381fb3d0df3e0877b46921e7fa8a58fc36 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34089-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107205, "hashes": {}}, "samples": 44550, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47824105, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8205012, "hashes": {}}, "samples": 5500, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5844708, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34089-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34089-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..373bd262550b7f12a15a48a70b443337cce983ba --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34089-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36107213, + "num_truncated_tokens": 36080353 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38481-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38481-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..93b03d3b311f8662fb022348d397c053d539b542 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38481-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108449, "hashes": {}}, "samples": 44357, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47832275, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9615564, "hashes": {}}, "samples": 6338, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6856490, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38481-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38481-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2d1549d2bf38568074bc1e635194f24f85a63fb0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38481-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36793079, + "num_truncated_tokens": 36765207 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42092-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42092-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5a5fd228c07b194c6df12d75767daf630fb33f60 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42092-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107974, "hashes": {}}, "samples": 42723, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47499762, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20071106, "hashes": {}}, "samples": 12993, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14279675, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42293-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42293-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4c4ba4b25e3a3cc995ec7c5b4dd8d7b103d578bf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42293-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107071, "hashes": {}}, "samples": 43184, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47667649, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19001158, "hashes": {}}, "samples": 12118, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13435283, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42293-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42293-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..216a3631d37708bdf5c14ce90029b6a1cb23d218 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42293-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41342408, + "num_truncated_tokens": 41307396 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42369-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42369-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c481cbc12f913d97bf745e2e12448f9f0ed45712 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42369-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107101, "hashes": {}}, "samples": 43291, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47870290, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16089909, "hashes": {}}, "samples": 10471, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11508388, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42369-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42369-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..607ebac2133e9d19289147d73b214d3c31d4ae19 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42369-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39934388, + "num_truncated_tokens": 39901744 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4313-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4313-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b3247da8c684c057b82fd90b41bfd6465165c0c4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4313-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108458, "hashes": {}}, "samples": 44135, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47787333, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11574634, "hashes": {}}, "samples": 7574, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8239083, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4313-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4313-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..dbcd08f558e11e51bc7d12694f25f27f378fe282 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4313-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37741290, + "num_truncated_tokens": 37711750 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44652-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44652-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3f4d36b07391a5f17f7b6722a803ca8b6a9613ba --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44652-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108220, "hashes": {}}, "samples": 43251, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47597445, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17891252, "hashes": {}}, "samples": 11394, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12655435, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44652-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44652-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a1d5ff0186d6c97d43556ad932d95cf8fc2166c6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44652-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40808308, + "num_truncated_tokens": 40773922 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44666-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44666-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2e2624c7a6f6725c9a792cb21c1af4785b36a12c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44666-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108465, "hashes": {}}, "samples": 43933, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47489705, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11807723, "hashes": {}}, "samples": 7647, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8338996, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44666-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44666-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7ddc2fe8f56a9c62d4b31c32ff7c632a991b6431 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44666-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37861666, + "num_truncated_tokens": 37832795 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4600-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4600-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8dd5c06cde1f41c0800ac42daf6dab8db4dc0872 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4600-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106997, "hashes": {}}, "samples": 42866, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47492568, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20252754, "hashes": {}}, "samples": 12778, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14258726, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4600-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4600-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..87d4fd754e1bbf388443a2d8f574d26fcb1bd572 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4600-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41957398, + "num_truncated_tokens": 41921665 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47283-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47283-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d996a99a41ac3fb3c70993963e88a8e1a8bdd19c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47283-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108481, "hashes": {}}, "samples": 43574, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48072539, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14253053, "hashes": {}}, "samples": 9364, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10178147, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47283-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47283-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c80022b3a529ca00cb83e67c622a14d6453a51b7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47283-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39042212, + "num_truncated_tokens": 39010511 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48382-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48382-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..fd4b842b6587b4441d4ffa861df9e5b31a91b1e6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48382-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107933, "hashes": {}}, "samples": 43931, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47730197, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12194408, "hashes": {}}, "samples": 8155, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8719048, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48382-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48382-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..798fac60150bda6ba57a58c4a48fddcace8b581d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48382-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38039248, + "num_truncated_tokens": 38009091 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_52847-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_52847-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..1fd8f6eda3d10695589499f9608745f935f730c7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_52847-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107527, "hashes": {}}, "samples": 42586, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47749364, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 23253462, "hashes": {}}, "samples": 14669, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16555831, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_52847-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_52847-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..375bdbdefbc30ecd5a8989a675211c7639707870 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_52847-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43408215, + "num_truncated_tokens": 43369433 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53845-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53845-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2abdebca41ba68665ae6895b89934e23d65aa93f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53845-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108563, "hashes": {}}, "samples": 44255, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47951320, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11174214, "hashes": {}}, "samples": 7376, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7977629, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53845-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53845-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a4ae7f522b891cb01f720d7a1fb6e04592ce746a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53845-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37543549, + "num_truncated_tokens": 37513649 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56099-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56099-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..83d1e59e00896f7e6f3f529cfd82e73ba6e476a4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56099-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108018, "hashes": {}}, "samples": 44094, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47810328, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11141193, "hashes": {}}, "samples": 7308, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7923914, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56099-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56099-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4174d1f15f5637cc9768f7f6fd93071ef746cdc1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56099-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37533714, + "num_truncated_tokens": 37505065 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56680-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56680-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2b93ba75dd4397eed1bd2b8268c9b95836fe2be2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56680-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108605, "hashes": {}}, "samples": 43640, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47860545, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14682801, "hashes": {}}, "samples": 9564, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10510423, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56680-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56680-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..98419368913be66088362c3da8a9a2e659011fc0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56680-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39248913, + "num_truncated_tokens": 39216573 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57103-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57103-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..9a45c8c9593f3befd86dd47d0256fa6eb7b33926 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57103-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108485, "hashes": {}}, "samples": 44095, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47631859, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10411975, "hashes": {}}, "samples": 6908, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7408273, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57103-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57103-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8ad253e36c49d7e8a557f78bd899b836ff7d0b67 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_57103-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37181713, + "num_truncated_tokens": 37153330 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59646-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59646-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..448f9ae2057628a97123fa9322581b7cc865c297 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59646-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108739, "hashes": {}}, "samples": 44437, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47760476, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10706414, "hashes": {}}, "samples": 6944, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7646193, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59646-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59646-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..87f858be20bc6a40c6d7eda523b28cc5fbe0739c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59646-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37317488, + "num_truncated_tokens": 37288936 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61119-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61119-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..28429fd10b55f65450fdf32b215d3f7a5e8e03d0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61119-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108113, "hashes": {}}, "samples": 42902, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47745140, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18815691, "hashes": {}}, "samples": 12141, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13360302, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61119-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61119-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c883cecfbd31500d9b5b944c9c2a5a2d87a236a0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61119-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41258097, + "num_truncated_tokens": 41222977 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63194-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63194-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3aeac4c33e316278412e9eff199c6617b6249d20 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63194-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107853, "hashes": {}}, "samples": 42392, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47628083, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 23224711, "hashes": {}}, "samples": 14683, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16537413, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63194-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63194-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..21e2bdcbaf4acf872326842d1fa52e033b21af5d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63194-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43399484, + "num_truncated_tokens": 43361239 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63609-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63609-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..fe60cc189599fe66dc9e714f8e5f2f705a636796 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63609-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107358, "hashes": {}}, "samples": 43686, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47825531, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15671702, "hashes": {}}, "samples": 9973, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11114871, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63609-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63609-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..52442ec9aa9ac98562f9eb46f8585a515c3bdda9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63609-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39728813, + "num_truncated_tokens": 39695556 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66129-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66129-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..cb4027d4bb51b9960e519c57caef5cea17352b02 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66129-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107075, "hashes": {}}, "samples": 44297, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47846786, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8918419, "hashes": {}}, "samples": 5943, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6409309, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66129-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66129-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..3423a78db17212ebff3aebbfd7e0a5f8f01db25f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66129-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36457855, + "num_truncated_tokens": 36431105 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67604-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67604-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8dde3d1d7d9f594d5bcd57a44dec6308ef7566c2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67604-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108827, "hashes": {}}, "samples": 43267, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47484659, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17649588, "hashes": {}}, "samples": 11411, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12547320, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67604-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67604-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b71d53f28893098435aa1288d0868678d8f27db3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67604-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40686962, + "num_truncated_tokens": 40652567 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69187-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69187-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c85d43f4565ba30976a033766739fe7383f809f8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69187-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108778, "hashes": {}}, "samples": 42896, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47681126, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19901299, "hashes": {}}, "samples": 12967, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14198064, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69187-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69187-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..06e6fc7bbc54f0f6457253b3edc243012fcb4b49 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69187-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41776063, + "num_truncated_tokens": 41739783 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69205-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69205-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..93e4b02c8551e4b91ad4380187010c99531d984f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69205-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107137, "hashes": {}}, "samples": 43228, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48047525, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16731261, "hashes": {}}, "samples": 10776, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11987751, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69205-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69205-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8d1186282a8858ff1a620e69a7d1419f01793c48 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69205-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40247548, + "num_truncated_tokens": 40213785 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6988-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6988-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b8db9a0887a8dd703affd2ab2dd2898792e163e6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6988-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107640, "hashes": {}}, "samples": 43902, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47721978, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10849538, "hashes": {}}, "samples": 7164, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7677387, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6988-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6988-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7898521755491f42adb8cc11f46fe02552f69bd1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6988-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37398057, + "num_truncated_tokens": 37369375 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_84668-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_84668-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4a728631ebf3ef64c6169763b32de7b50c8a003c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_84668-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107176, "hashes": {}}, "samples": 44012, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47705653, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11351078, "hashes": {}}, "samples": 7420, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8131888, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_84668-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_84668-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..54fe99e94afbb21f4afa46c0864a54f4da434fa0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_84668-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37637306, + "num_truncated_tokens": 37608125 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87121-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87121-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..249db6cdb82d6ad938806f0d3ea7b6014bfd81ca --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87121-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106875, "hashes": {}}, "samples": 42994, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47861125, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17890109, "hashes": {}}, "samples": 11611, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12712297, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87121-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87121-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..bc061f6e6061b3b86fa6da0369d822da0f0ff5ad --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87121-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40808250, + "num_truncated_tokens": 40774108 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87944-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87944-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..796f36f067923bff770c0dc0bf87fa5cdc0845ef --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87944-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108220, "hashes": {}}, "samples": 43122, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47908150, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17858849, "hashes": {}}, "samples": 11444, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12795958, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87944-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87944-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2331880adf1dd26cb4d01edc672d18beed620e25 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_87944-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40794427, + "num_truncated_tokens": 40760196 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94600-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94600-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..469f91ae5112c77cf615edc1c0719de63e3c3461 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94600-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108659, "hashes": {}}, "samples": 43926, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47669220, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12623124, "hashes": {}}, "samples": 8155, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8969276, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94600-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94600-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e3cdb954d303ba7e60e43c684ab3df744ba8f0f3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_94600-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38253994, + "num_truncated_tokens": 38224139 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_98035-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_98035-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a6b2bd7f10ed0c7a5c1499d0ba12601c715ca418 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_98035-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108318, "hashes": {}}, "samples": 43886, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47627647, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12873173, "hashes": {}}, "samples": 8176, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9137884, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_98035-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_98035-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c0a1771c0ac01fa9c8a3f3fb0b52ca9956f549ff --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_98035-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38379330, + "num_truncated_tokens": 38348914 +} \ No newline at end of file diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..ecd34a565441249653d2d0f97612b91d6ba01ab7 --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdafb33a0af945e5f251b8764e730a3cef3ef98817af35447eaff592b127fe44 +size 67107304 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..39da2c13b830204e8ecd7b7c459418f1197fd365 --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6f2e0a8eaf8fbd693c29d646c06aaed0b96d3b1968467af69cc96352eae618a +size 67108383 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..6878780c799738b56833e432ec34c2c387aaa7ba --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e173ad5f8a3a4114dd97d1710db8e8a53276f6631325fe78d9413e84624ed45b +size 67107217 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..1981a69439178a08acfe61cb2ed4c3fe2662cf3c --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82f464a18420e1670e3cba345d75391fde1066d6c69ae9c398bd4e9cddbf1ad1 +size 67108665 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds new file mode 100644 index 0000000000000000000000000000000000000000..d19bc5812f2327e82cb21997fbffaa257ae4bc8b --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fba1de58f276280daf1d3b32df2399b4a57d24431fa819484ce2575e9924b95f +size 67107903 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..bf97344a4b8338e596bc4434358b70439e214e88 --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0253cd7767d25f1fc3fd970e10bc8d3c16e6d7b8cb679b538023806440297079 +size 67107778 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds new file mode 100644 index 0000000000000000000000000000000000000000..6cd8903401511ad223dc8d605849dd2a3873eb8f --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8e659c8c722451f063ce8cbff29ffadf7426ac7f6809d057f1408cd2fc7304d +size 67107386 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds new file mode 100644 index 0000000000000000000000000000000000000000..464804711c23cc1e282b0a104bc9be128c443b8c --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf5d75dc97b231b6277141289bf2e0458d206e76f5547c002fe1a99a5e35e8e4 +size 67108155 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds new file mode 100644 index 0000000000000000000000000000000000000000..7faa65acac7c701738897a3a49c89ed0cf2038da --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf45688710d79c09a607b3ca3ae18f072a818992983f3dac0cf4461ae51d7686 +size 67107504 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds new file mode 100644 index 0000000000000000000000000000000000000000..51e25e24aceb2b53c646bad2316d581447db4352 --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4128a17127ce7997ac7f369e85585a0faa558f9843742b9fefba2ed8293616a6 +size 67108270 diff --git a/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds new file mode 100644 index 0000000000000000000000000000000000000000..cfdf1b2cab23095d1534b730efca421b006299bb --- /dev/null +++ b/train/stackexchange/stackexchange_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1ae7b5c16959d5cec7958974fe57a2bcf787c1b299485db0618f9f1a883375b +size 67108045 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..d46b851664cef4da1f92d2bd9e4bb002e8df616e --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fda6502e9a2974cbfa2914a13de1d0c0726b69bb214940ae92169ee68e5189d3 +size 67108846 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..8800e9887f1acd3487a92e735c59b6a5f652df37 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e89bc8ab1bf3232c43018aa48126257304589c3650ae379246c815091f3f43e +size 67107208 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..5d4f1d8ed9d9ef6cb09b89f250d11f3738828b39 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39a6d0d463548859a8823f2a336c5b0ecaa72bba8d5e13e516953efb6a340393 +size 67108087 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..6006e20436cdc4b624262eb9acd752b90dd525cd --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66b18ab82ac81bd1b79ea7e0c80d2717faaad8bdd75b73ffd04739e0afe38b68 +size 67107196 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..c8b606a64a48e4abfe6c99ee87a8e402076cdf3e --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12bb96674123b697762db8fc6ab48f7dd945ce9ba510dc06f653511e90c48171 +size 67107426 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds new file mode 100644 index 0000000000000000000000000000000000000000..e997d12b18a8ed3d2f6ecfabde89af1e768c64f2 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51726b28758f7fce220f5815405ccb8e781daef863c490d5776096f1b81b400f +size 67108350 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..6da1b20279cef644be1495ab24088d6066837c39 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0962e66075fc62e783a3a3321b81ee61e74770ef100e6a80ae2233518044b2f4 +size 67108308 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds new file mode 100644 index 0000000000000000000000000000000000000000..5e0809c73ba6a020a51a62bb5e9a5ba12304198c --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39e5a88b5e40e1b0e73857b738c6b27ba5f2dba65605e66e75f7621f4672e717 +size 67108136 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..c31d53e720ee7f19cba9528303356c7a0c546aa5 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51cf54403584f7ea885bf920de3784e8557e8ecaea2eb8415701bf18ae3324c8 +size 67108796 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds new file mode 100644 index 0000000000000000000000000000000000000000..71d891e0282103fdc1c284d062287e097b984fed --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9c3ff5ca186a70d620286e276d9e2a213719e70514ae1783abeeb80701ae908 +size 67108782 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds new file mode 100644 index 0000000000000000000000000000000000000000..75d5b08487e0d29f97424c73981eabf4eb86b918 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddaaa5b886c2b419a962e61e10acad55e2e9b4595ea1e56869758cb74c21feb9 +size 67108751 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds new file mode 100644 index 0000000000000000000000000000000000000000..340b67b616ca34a1b0fba2de4cabf480f427868a --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f7cfeccbea634dfc79bab3b4e99760a9162e8bbf44f2869862d99a3e8d959a1 +size 67108614 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds new file mode 100644 index 0000000000000000000000000000000000000000..e5547dfa7190e313250e7bdd117042646cca0f02 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f9f8f240c557ffb26117bf3c66742ebab2852a7786ada17e6428bb1ef3bd66a +size 67107678 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds new file mode 100644 index 0000000000000000000000000000000000000000..44cd67c3ad596575d304d31f2ebfd10994a1a634 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60fa01b4b03cc93353a18c2f5ba716b7b92d921c1ee84177e67810391552e7bf +size 67108506 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds new file mode 100644 index 0000000000000000000000000000000000000000..d92f54def3993a71a917cd98b734db837bf1dabe --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9a32ee5ca30d0babe9c0657a50b4df69dc39763ecd5886ce30f9341ffa024d4 +size 67107465 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds new file mode 100644 index 0000000000000000000000000000000000000000..a0775a25b1a5422f0a623c7c3d77c2e2d9c2d5bf --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a0c767b1be4b659ff6f5fee9d653f434fe19785e6f71e0904c5dc17abf511d5 +size 67108468 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds new file mode 100644 index 0000000000000000000000000000000000000000..b99b7ec3c33864c7ddfa2e3ba07a0feedd9597d9 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:538d15c3caab24392539eef285be720dec7f98ed990b28d9f8bb360110bc5746 +size 67108432 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds new file mode 100644 index 0000000000000000000000000000000000000000..fc7c3e505e1f417011b596a5e82e2179b6410b45 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b924db7f339f4ee56a5fd7396bf1eba39f2483f1b3ba6eadbcbfb09bbce6489e +size 67108705 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds new file mode 100644 index 0000000000000000000000000000000000000000..e6587cbfdf02d1fa30f931b499226fb1a7dfb787 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0416f97bcf949c4e6c1130a004800a0cfb33a5e353568b4f5b9316a6401b0da9 +size 67106838 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds new file mode 100644 index 0000000000000000000000000000000000000000..a1d7efb0c1e906a05daf82a480dbd77c9cf517c0 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3e33e09a6729416357ae3c0fda6b92927a10915cc8e47fdb5e88309c44f4923 +size 67108477 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds new file mode 100644 index 0000000000000000000000000000000000000000..488f77b8d39ab10d73d199a3e286f877873d0c9d --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f6d166603df52ea5cb1f736f3be8d07413140a0a9032da906f78e9b3448bf79 +size 67108024 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds new file mode 100644 index 0000000000000000000000000000000000000000..a0aa284e5126e6322b1b1b9dc02be7520ec216dd --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc7f3a7cd92fb5aa357dccb9ef0476797d83f3262b6060069cf905ec6ac37d02 +size 67108265 diff --git a/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds new file mode 100644 index 0000000000000000000000000000000000000000..3619d3aa7e5fe2de0997a4ef949922b49f069848 --- /dev/null +++ b/train/stackexchange/stackexchange_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e586d3a28b01a7da302b52950f60096451eb4bcb21546d93fe924e8de8b9bfe0 +size 22600318