Datasets:

Multilinguality:
monolingual
en-nl
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
extended
ArXiv:
License:
File size: 1,390 Bytes
05f41a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from itertools import chain

import pytest
from datasets import load_dataset


def test_streaming_dataset():
    datasets = load_dataset("./mc4_nl_cleaned.py", "tiny", streaming=True)
    assert list(datasets.keys()) == ["train", "validation"]

    train_ds = datasets["train"]
    val_ds = datasets["validation"]

    i = iter(train_ds)
    e = next(i)
    assert len(e["text"]) == 1264
    e = next(i)
    assert len(e["text"]) == 1091

    assert sum(1 for _ in iter(val_ds)) == 16189


def test_batch_dataset():
    datasets = load_dataset(
        "./mc4_nl_cleaned.py",
        "micro",
    )
    assert list(datasets.keys()) == ["train", "validation"]

    train_ds = datasets["train"]
    val_ds = datasets["validation"]

    e = next(iter(train_ds))
    assert len(e["text"]) == 1264

    assert train_ds.num_rows == 125938
    assert val_ds.num_rows == 16189


def test_nl_en():
    datasets = load_dataset(
        "./mc4_nl_cleaned.py",
        "micro_en_nl",
    )
    assert list(datasets.keys()) == ["train", "validation"]

    train_ds = datasets["train"]
    val_ds = datasets["validation"]

    i = iter(train_ds)
    e = next(i)
    assert len(e["text"]) == 1264  ## Length of 'Japanse bedrijven zijn ...'
    e = next(i)
    assert len(e["text"]) == 747  ## Length of 'Beginners BBQ class ...'

    assert train_ds.num_rows == 251900
    assert val_ds.num_rows == 32378