imthanhlv commited on
Commit
c0c3c21
1 Parent(s): 415426b

added script with val

Browse files
Files changed (1) hide show
  1. binhvq_dedup.py +77 -0
binhvq_dedup.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import os
3
+ import logging
4
+ import lm_dataformat
5
+
6
+
7
+ _DESC = """
8
+ BinhVQ news corpus version 2021 (~20M records)
9
+
10
+ https://github.com/binhvq/news-corpus
11
+
12
+ Preprocessed:
13
+ - Read mongo dump and export to jsonl
14
+ - Clean content with Beautifulsoup
15
+ - Concatenate title, sapo and content
16
+ - Remove exact match sha256
17
+ - Shuffle and split train / val (0.01)
18
+
19
+ **IMPORTANT**: Please run `pip install lm_dataformat` before load this dataset
20
+
21
+ """
22
+
23
+ _REPO_URL = "https://huggingface.co/datasets/imthanhlv/binhvq_dedup/tree/main/"
24
+ _URLS = {
25
+ "val": "val.jsonl.zst",
26
+ }
27
+
28
+
29
+ try:
30
+ import lm_dataformat
31
+ except ImportError:
32
+ print(
33
+ "Can't import lm_dataformat, please run pip install lm_dataformat and try again"
34
+ )
35
+ exit()
36
+
37
+
38
+ class BinhvqConfig(datasets.BuilderConfig):
39
+ def __init__(self, **kwargs):
40
+ super(BinhvqConfig, self).__init__(**kwargs)
41
+
42
+
43
+ class Binhvq(datasets.GeneratorBasedBuilder):
44
+ BUILDER_CONFIGS = [
45
+ BinhvqConfig(
46
+ name="plain_text",
47
+ version=datasets.Version("1.0.0", ""),
48
+ description="Plain text",
49
+ ),
50
+ ]
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESC,
55
+ features=datasets.Features({"text": datasets.Value("string")}),
56
+ homepage="https://github.com/binhvq/news-corpus",
57
+ )
58
+
59
+ def _split_generators(self, dl_manager):
60
+ downloaded_files = dl_manager.download_and_extract(_URLS)
61
+ return [
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.TRAIN,
64
+ gen_kwargs={"filepath": downloaded_files["val"]},
65
+ ),
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.VALIDATION,
68
+ gen_kwargs={"filepath": downloaded_files["val"]},
69
+ ),
70
+ ]
71
+
72
+ def _generate_examples(self, filepath):
73
+ logging.info(f"Generating examples from {filepath}")
74
+ reader = lm_dataformat.Reader(filepath)
75
+ for doc in reader.stream_data():
76
+ yield {"text": doc}
77
+