File size: 2,132 Bytes
c0c3c21
 
 
 
a691575
c0c3c21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4573806
bb68b43
c0c3c21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a691575
c0c3c21
a691575
c0c3c21
 
 
 
 
 
 
 
 
 
 
f52812d
c0c3c21
 
 
bb68b43
c0c3c21
 
 
 
 
 
 
 
2d7effc
a691575
4be43e7
a203e97
a691575
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import datasets
import os
import logging
import lm_dataformat
from tqdm import tqdm


_DESC = """
BinhVQ news corpus version 2021 (~20M records)

https://github.com/binhvq/news-corpus

Preprocessed:
    - Read mongo dump and export to jsonl
    - Clean content with Beautifulsoup
    - Concatenate title, sapo and content
    - Remove exact match sha256
    - Shuffle and split train / val (0.01)

**IMPORTANT**: Please run `pip install lm_dataformat` before load this dataset

"""

_REPO_URL = "https://huggingface.co/datasets/imthanhlv/binhvq_dedup/tree/main/"
_URLS = {
    "train": "train.jsonl.zst",
    "val": "val.jsonl.zst"
}


try:
    import lm_dataformat
except ImportError:
    print(
        "Can't import lm_dataformat, please run pip install lm_dataformat and try again"
    )
    exit()


class BinhvqConfig(datasets.BuilderConfig):
    def __init__(self, **kwargs):
        super(BinhvqConfig, self).__init__(**kwargs)


class Binhvq(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        BinhvqConfig(
            name="text",
            version=datasets.Version("1.0.0", ""),
            description="Text",
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESC,
            features=datasets.Features({"text": datasets.Value("string")}),
            homepage="https://github.com/binhvq/news-corpus",
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(_URLS)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": downloaded_files["train"]},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"filepath": downloaded_files["val"]},
            ),
        ]

    def _generate_examples(self, filepath):
        logging.warning(f"Generating examples from {filepath}")
        _id = 0
        reader = lm_dataformat.Reader(filepath)
        for doc in reader.read_jsonl_zst(filepath):
            yield _id, {"text": doc}
            _id += 1