Datasets:

Multilinguality:
monolingual
en-nl
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
extended
ArXiv:
License:
Yeb Havinga commited on
Commit
115b07a
1 Parent(s): 8e6113c

In streaming mode shuffle train files

Browse files
Files changed (2) hide show
  1. mc4_nl_cleaned.py +4 -0
  2. test_mc4_nl_cleaned.py +2 -2
mc4_nl_cleaned.py CHANGED
@@ -19,6 +19,7 @@ import json
19
  import gzip
20
  import textwrap
21
  import datasets
 
22
  from itertools import zip_longest
23
 
24
  logger = datasets.logging.get_logger(__name__)
@@ -151,6 +152,9 @@ class Mc4(datasets.GeneratorBasedBuilder):
151
  n_shards=8 if split == "validation" else 1024,
152
  )
153
  )
 
 
 
154
  train_downloaded_files = dl_manager.download(data_urls["train"])
155
  validation_downloaded_files = dl_manager.download(data_urls["validation"])
156
  return [
 
19
  import gzip
20
  import textwrap
21
  import datasets
22
+ import random
23
  from itertools import zip_longest
24
 
25
  logger = datasets.logging.get_logger(__name__)
 
152
  n_shards=8 if split == "validation" else 1024,
153
  )
154
  )
155
+ # Shuffle data in streaming mode, so restarts will not always start with the same data
156
+ if dl_manager.is_streaming:
157
+ random.shuffle(data_urls["train"])
158
  train_downloaded_files = dl_manager.download(data_urls["train"])
159
  validation_downloaded_files = dl_manager.download(data_urls["validation"])
160
  return [
test_mc4_nl_cleaned.py CHANGED
@@ -13,9 +13,9 @@ def test_streaming_dataset():
13
 
14
  i = iter(train_ds)
15
  e = next(i)
16
- assert len(e["text"]) == 1264
17
  e = next(i)
18
- assert len(e["text"]) == 1091
19
 
20
  assert sum(1 for _ in iter(val_ds)) == 16189
21
 
 
13
 
14
  i = iter(train_ds)
15
  e = next(i)
16
+ assert len(e["text"]) != 1264
17
  e = next(i)
18
+ assert len(e["text"]) != 1091
19
 
20
  assert sum(1 for _ in iter(val_ds)) == 16189
21