orionweller
commited on
Commit
•
278a4e5
1
Parent(s):
c4f1641
Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +32 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds +3 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10639-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10639-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11025-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11025-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11446-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11446-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16700-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16700-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17882-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17882-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19239-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20610-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20610-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20638-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20638-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22588-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22588-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30055-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30055-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30190-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30190-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35874-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35874-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36924-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36924-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38421-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38421-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42412-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42412-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47421-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
.gitattributes
CHANGED
@@ -27263,3 +27263,35 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_70702-tokenized-chun
|
|
27263 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_81903-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27264 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_87800-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27265 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_32597-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27263 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_81903-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27264 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_87800-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27265 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_32597-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27266 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_87800-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27267 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_39077-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27268 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10275-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27269 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_78471-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27270 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10275-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27271 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_4270-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27272 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_17535-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27273 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_39077-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27274 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_95960-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27275 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_78471-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27276 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_95960-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27277 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_17535-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27278 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_17874-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27279 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
|
27280 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
|
27281 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text
|
27282 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
|
27283 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text
|
27284 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
|
27285 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
|
27286 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
|
27287 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds filter=lfs diff=lfs merge=lfs -text
|
27288 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
|
27289 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27290 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text
|
27291 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
|
27292 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds filter=lfs diff=lfs merge=lfs -text
|
27293 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
|
27294 |
+
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds filter=lfs diff=lfs merge=lfs -text
|
27295 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
|
27296 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds filter=lfs diff=lfs merge=lfs -text
|
27297 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3080297ec6810be5e2b15a0f42621c3ef9504ab0e3ecf727a640b816da86dfc7
|
3 |
+
size 67107235
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e4d8118b15e05496e412fb94fa4f81c445c1ba8f8636957830c0cac16568cd96
|
3 |
+
size 67107886
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b87b05f1b9319f590d88fe693b19a6cd0349e8a76b595a1a91220879fc3ec049
|
3 |
+
size 67107815
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b492a0ddd5de360b4cd8fecd7ef66a3c42fd991e2758fcfa8b29ed15b820f02d
|
3 |
+
size 67108864
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1bfafd87bcc33b2f10476fbf36bec1fd85970d88cbe428f5526d28c81dd34751
|
3 |
+
size 67107454
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f9bcaac33c5c4b7189ae693057807a8b459ddb8048b652090bc1d8c89bccdf4
|
3 |
+
size 67107669
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b77d1ac4db73483b6395d7acddde4f2abae819e544de95427157117397d8aa3d
|
3 |
+
size 67107174
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33f6ab95691b5f5611cb41d5d013789e95220abbafc6664e68ed43bc99055038
|
3 |
+
size 67106790
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1d4b2f2d68027996b24da9928a22e4696674e3d78a280c03b8edb3bbd82eb1a
|
3 |
+
size 67107200
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:741613371c870625676dbff91dc589a5dfc1471a66d72e648a051e53f32119db
|
3 |
+
size 67108029
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b8a5d0472cfe6ef2aee5aeedc85b5440b2d6fee714eb6ff7e0a6fc7f8aaf6a84
|
3 |
+
size 67106802
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32ef31c3fbf9e94a61bc5d3c85db97fadf18541a15bc3f3894752fa14ef7a5c4
|
3 |
+
size 67107054
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d49a5b15f2b7bb85284f1d72f6f38d8a5893c26cd4b3963c065375653cd1782
|
3 |
+
size 67107127
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67743f220b1f4f74657a95b6ffe8b7c2edb2666bbbe1fee30e16e3adc97cc830
|
3 |
+
size 67108157
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd9ae48665edf7bd4946897480b9ef212a046cef9c8292e8061eb129641d042b
|
3 |
+
size 67106963
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:49b7c8cfa4fd83ea9b50279fe7c6e3555211b6cbc1f83bc95a0019596e53ac82
|
3 |
+
size 67108767
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cdd72c49faeaca4963c1dd8d8e25661fb7e8961064141e0cad05a4315d43d970
|
3 |
+
size 67107941
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04080d755abb1836504b6912b3f3bedd12037a73b6c3f89b594fc88eae9c3f33
|
3 |
+
size 67107658
|
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d150f9b67a7997c9372a784fbab6d03f0d3da8f61d82d150173576643e0c3a2a
|
3 |
+
size 67108767
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10639-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107180, "hashes": {}}, "samples": 43910, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47649932, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13471920, "hashes": {}}, "samples": 8440, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9579408, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10639-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 38669198,
|
3 |
+
"num_truncated_tokens": 38638609
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11025-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107026, "hashes": {}}, "samples": 42993, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47445456, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18452202, "hashes": {}}, "samples": 11795, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13017565, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11025-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 41083664,
|
3 |
+
"num_truncated_tokens": 41049074
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11446-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107624, "hashes": {}}, "samples": 43946, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47678065, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11496046, "hashes": {}}, "samples": 7520, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8242349, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11446-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37708945,
|
3 |
+
"num_truncated_tokens": 37679761
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16700-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108137, "hashes": {}}, "samples": 42551, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47690788, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21155823, "hashes": {}}, "samples": 13615, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15035410, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16700-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 42393481,
|
3 |
+
"num_truncated_tokens": 42356307
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17882-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108392, "hashes": {}}, "samples": 44200, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47793641, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10661894, "hashes": {}}, "samples": 7104, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7593054, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17882-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37297424,
|
3 |
+
"num_truncated_tokens": 37269178
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19239-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108589, "hashes": {}}, "samples": 43419, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47527441, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15345260, "hashes": {}}, "samples": 9867, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10904667, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20610-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108821, "hashes": {}}, "samples": 42530, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47712650, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21046745, "hashes": {}}, "samples": 13512, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14982989, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20610-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 42342963,
|
3 |
+
"num_truncated_tokens": 42306242
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20638-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107266, "hashes": {}}, "samples": 42389, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47711959, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21918968, "hashes": {}}, "samples": 14120, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15584217, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20638-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 42763963,
|
3 |
+
"num_truncated_tokens": 42726215
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22588-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108570, "hashes": {}}, "samples": 43660, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47621245, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14542057, "hashes": {}}, "samples": 9518, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10398714, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22588-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 39179559,
|
3 |
+
"num_truncated_tokens": 39147160
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30055-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107934, "hashes": {}}, "samples": 44150, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47696854, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10573212, "hashes": {}}, "samples": 6969, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7550767, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30055-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37258525,
|
3 |
+
"num_truncated_tokens": 37230155
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30190-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107540, "hashes": {}}, "samples": 44136, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47704936, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11042346, "hashes": {}}, "samples": 7113, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7819649, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30190-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37488793,
|
3 |
+
"num_truncated_tokens": 37460188
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35874-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106854, "hashes": {}}, "samples": 43964, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47794748, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12119103, "hashes": {}}, "samples": 7976, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8642645, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_35874-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 38005475,
|
3 |
+
"num_truncated_tokens": 37975638
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36924-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107140, "hashes": {}}, "samples": 43606, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48009011, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15082910, "hashes": {}}, "samples": 9675, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10803128, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36924-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 39445807,
|
3 |
+
"num_truncated_tokens": 39413150
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38421-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108430, "hashes": {}}, "samples": 43463, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47695378, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14410719, "hashes": {}}, "samples": 9467, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10256419, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38421-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 39121307,
|
3 |
+
"num_truncated_tokens": 39089642
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42412-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107237, "hashes": {}}, "samples": 43181, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47635037, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18442520, "hashes": {}}, "samples": 11834, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13099050, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_42412-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 41072135,
|
3 |
+
"num_truncated_tokens": 41037306
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47421-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107670, "hashes": {}}, "samples": 44456, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47807846, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9458481, "hashes": {}}, "samples": 6376, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6801754, "hashes": {}}}], "version": 2}
|