Datasets:
updated readme
Browse files- README.md +4 -57
- v1/train/s2ag/00000.json.gz β data/v1/train-00000-of-00020.json.gz +0 -0
- v1/train/s2ag/00001.json.gz β data/v1/train-00001-of-00020.json.gz +0 -0
- v1/train/s2ag/00002.json.gz β data/v1/train-00002-of-00020.json.gz +0 -0
- v1/train/s2ag/00003.json.gz β data/v1/train-00003-of-00020.json.gz +0 -0
- v1/train/s2ag/00004.json.gz β data/v1/train-00004-of-00020.json.gz +0 -0
- v1/train/s2ag/00005.json.gz β data/v1/train-00005-of-00020.json.gz +0 -0
- v1/train/s2ag/00006.json.gz β data/v1/train-00006-of-00020.json.gz +0 -0
- v1/train/s2ag/00007.json.gz β data/v1/train-00007-of-00020.json.gz +0 -0
- v1/train/s2ag/00008.json.gz β data/v1/train-00008-of-00020.json.gz +0 -0
- v1/train/s2ag/00009.json.gz β data/v1/train-00009-of-00020.json.gz +0 -0
- v1/train/s2orc/00000.json.gz β data/v1/train-00010-of-00020.json.gz +0 -0
- v1/train/s2orc/00001.json.gz β data/v1/train-00011-of-00020.json.gz +0 -0
- v1/train/s2orc/00002.json.gz β data/v1/train-00012-of-00020.json.gz +0 -0
- v1/train/s2orc/00003.json.gz β data/v1/train-00013-of-00020.json.gz +0 -0
- v1/train/s2orc/00004.json.gz β data/v1/train-00014-of-00020.json.gz +0 -0
- v1/train/s2orc/00005.json.gz β data/v1/train-00015-of-00020.json.gz +0 -0
- v1/train/s2orc/00006.json.gz β data/v1/train-00016-of-00020.json.gz +0 -0
- v1/train/s2orc/00007.json.gz β data/v1/train-00017-of-00020.json.gz +0 -0
- v1/train/s2orc/00008.json.gz β data/v1/train-00018-of-00020.json.gz +0 -0
- v1/train/s2orc/00009.json.gz β data/v1/train-00019-of-00020.json.gz +0 -0
- v1/validation/s2ag/00000.json.gz β data/v1/validation-00000-of-00002.json.gz +0 -0
- v1/validation/s2orc/00000.json.gz β data/v1/validation-00001-of-00002.json.gz +0 -0
- v2/train/s2ag/00000.json.gz β data/v2/train-00000-of-00020.json.gz +0 -0
- v2/train/s2ag/00001.json.gz β data/v2/train-00001-of-00020.json.gz +0 -0
- v2/train/s2ag/00002.json.gz β data/v2/train-00002-of-00020.json.gz +0 -0
- v2/train/s2ag/00003.json.gz β data/v2/train-00003-of-00020.json.gz +0 -0
- v2/train/s2ag/00004.json.gz β data/v2/train-00004-of-00020.json.gz +0 -0
- v2/train/s2ag/00005.json.gz β data/v2/train-00005-of-00020.json.gz +0 -0
- v2/train/s2ag/00006.json.gz β data/v2/train-00006-of-00020.json.gz +0 -0
- v2/train/s2ag/00007.json.gz β data/v2/train-00007-of-00020.json.gz +0 -0
- v2/train/s2ag/00008.json.gz β data/v2/train-00008-of-00020.json.gz +0 -0
- v2/train/s2ag/00009.json.gz β data/v2/train-00009-of-00020.json.gz +0 -0
- v2/train/s2orc/00000.json.gz β data/v2/train-00010-of-00020.json.gz +0 -0
- v2/train/s2orc/00001.json.gz β data/v2/train-00011-of-00020.json.gz +0 -0
- v2/train/s2orc/00002.json.gz β data/v2/train-00012-of-00020.json.gz +0 -0
- v2/train/s2orc/00003.json.gz β data/v2/train-00013-of-00020.json.gz +0 -0
- v2/train/s2orc/00004.json.gz β data/v2/train-00014-of-00020.json.gz +0 -0
- v2/train/s2orc/00005.json.gz β data/v2/train-00015-of-00020.json.gz +0 -0
- v2/train/s2orc/00006.json.gz β data/v2/train-00016-of-00020.json.gz +0 -0
- v2/train/s2orc/00007.json.gz β data/v2/train-00017-of-00020.json.gz +0 -0
- v2/train/s2orc/00008.json.gz β data/v2/train-00018-of-00020.json.gz +0 -0
- v2/train/s2orc/00009.json.gz β data/v2/train-00019-of-00020.json.gz +0 -0
- v2/validation/s2ag/00000.json.gz β data/v2/validation-00000-of-00002.json.gz +0 -0
- v2/validation/s2orc/00000.json.gz β data/v2/validation-00001-of-00002.json.gz +0 -0
- pes2o.py +178 -0
README.md
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
---
|
2 |
-
license:
|
3 |
- odc-by
|
4 |
task_categories:
|
5 |
- text-generation
|
|
|
6 |
language:
|
7 |
- en
|
8 |
tags:
|
@@ -29,60 +30,6 @@ size_categories:
|
|
29 |
- 10B<n<100B
|
30 |
source_datasets:
|
31 |
- allenai/s2orc
|
32 |
-
|
33 |
-
config_names:
|
34 |
-
- v1
|
35 |
-
- v2
|
36 |
-
|
37 |
-
dataset_info:
|
38 |
-
- config_name: v1
|
39 |
-
features:
|
40 |
-
- name: added
|
41 |
-
dtype: string
|
42 |
-
- name: created
|
43 |
-
dtype: string
|
44 |
-
- name: id
|
45 |
-
dtype: string
|
46 |
-
- name: source
|
47 |
-
dtype: string
|
48 |
-
- name: text
|
49 |
-
dtype: string
|
50 |
-
- name: version
|
51 |
-
dtype: string
|
52 |
-
splits:
|
53 |
-
- name: train
|
54 |
-
num_bytes: 100145555091
|
55 |
-
num_examples: 67624463
|
56 |
-
- name: validation
|
57 |
-
num_bytes: 556447813
|
58 |
-
num_examples: 162551
|
59 |
-
download_size: 100702002904
|
60 |
-
dataset_size: 67787014
|
61 |
-
- config_name: v2
|
62 |
-
features:
|
63 |
-
- name: added
|
64 |
-
dtype: string
|
65 |
-
- name: created
|
66 |
-
dtype: string
|
67 |
-
- name: id
|
68 |
-
dtype: string
|
69 |
-
- name: source
|
70 |
-
dtype: string
|
71 |
-
- name: text
|
72 |
-
dtype: string
|
73 |
-
- name: version
|
74 |
-
dtype: string
|
75 |
-
splits:
|
76 |
-
- name: train
|
77 |
-
num_bytes: 86572382178
|
78 |
-
num_examples: 38811179
|
79 |
-
- name: validation
|
80 |
-
num_bytes: 556854302
|
81 |
-
num_examples: 161032
|
82 |
-
download_size: 87129236480
|
83 |
-
dataset_size: 38972211
|
84 |
-
|
85 |
-
|
86 |
---
|
87 |
|
88 |
# PES2O πΏπ
|
@@ -182,9 +129,9 @@ Unfiltered, the corpus contains 91.1M papers and 15.5B whitespace-separated toke
|
|
182 |
|
183 |
### Processing
|
184 |
|
185 |
-
PES2o V2 is largely the same as V1, but it includes additional heuristics s2ag aimed at filtering out OCR errors from abstract.
|
186 |
|
187 |
-
First, we check if the abstract was obtained from Semantic Scholar sources that are likely to contain OCR'ed content. For any abstract derived from those sources, we count how often the text contains subsequences matching `\b([A-Za-z]\s)([a-z]\s)*[A-Za-z]\b`, i.e. individual alpha letters separated by a space. This heuristic matches cases such as `A b stra ct` (2 matching subsequences), where the OCR parser inserted erroneous spaces.
|
188 |
Any abstract with more than 4 matching subsequences is removed.
|
189 |
|
190 |
|
|
|
1 |
---
|
2 |
+
license:
|
3 |
- odc-by
|
4 |
task_categories:
|
5 |
- text-generation
|
6 |
+
- fill-mask
|
7 |
language:
|
8 |
- en
|
9 |
tags:
|
|
|
30 |
- 10B<n<100B
|
31 |
source_datasets:
|
32 |
- allenai/s2orc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
---
|
34 |
|
35 |
# PES2O πΏπ
|
|
|
129 |
|
130 |
### Processing
|
131 |
|
132 |
+
PES2o V2 is largely the same as V1, but it includes additional heuristics s2ag aimed at filtering out OCR errors from abstract.
|
133 |
|
134 |
+
First, we check if the abstract was obtained from Semantic Scholar sources that are likely to contain OCR'ed content. For any abstract derived from those sources, we count how often the text contains subsequences matching `\b([A-Za-z]\s)([a-z]\s)*[A-Za-z]\b`, i.e. individual alpha letters separated by a space. This heuristic matches cases such as `A b stra ct` (2 matching subsequences), where the OCR parser inserted erroneous spaces.
|
135 |
Any abstract with more than 4 matching subsequences is removed.
|
136 |
|
137 |
|
v1/train/s2ag/00000.json.gz β data/v1/train-00000-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2ag/00001.json.gz β data/v1/train-00001-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2ag/00002.json.gz β data/v1/train-00002-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2ag/00003.json.gz β data/v1/train-00003-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2ag/00004.json.gz β data/v1/train-00004-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2ag/00005.json.gz β data/v1/train-00005-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2ag/00006.json.gz β data/v1/train-00006-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2ag/00007.json.gz β data/v1/train-00007-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2ag/00008.json.gz β data/v1/train-00008-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2ag/00009.json.gz β data/v1/train-00009-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00000.json.gz β data/v1/train-00010-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00001.json.gz β data/v1/train-00011-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00002.json.gz β data/v1/train-00012-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00003.json.gz β data/v1/train-00013-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00004.json.gz β data/v1/train-00014-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00005.json.gz β data/v1/train-00015-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00006.json.gz β data/v1/train-00016-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00007.json.gz β data/v1/train-00017-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00008.json.gz β data/v1/train-00018-of-00020.json.gz
RENAMED
File without changes
|
v1/train/s2orc/00009.json.gz β data/v1/train-00019-of-00020.json.gz
RENAMED
File without changes
|
v1/validation/s2ag/00000.json.gz β data/v1/validation-00000-of-00002.json.gz
RENAMED
File without changes
|
v1/validation/s2orc/00000.json.gz β data/v1/validation-00001-of-00002.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00000.json.gz β data/v2/train-00000-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00001.json.gz β data/v2/train-00001-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00002.json.gz β data/v2/train-00002-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00003.json.gz β data/v2/train-00003-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00004.json.gz β data/v2/train-00004-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00005.json.gz β data/v2/train-00005-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00006.json.gz β data/v2/train-00006-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00007.json.gz β data/v2/train-00007-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00008.json.gz β data/v2/train-00008-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2ag/00009.json.gz β data/v2/train-00009-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00000.json.gz β data/v2/train-00010-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00001.json.gz β data/v2/train-00011-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00002.json.gz β data/v2/train-00012-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00003.json.gz β data/v2/train-00013-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00004.json.gz β data/v2/train-00014-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00005.json.gz β data/v2/train-00015-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00006.json.gz β data/v2/train-00016-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00007.json.gz β data/v2/train-00017-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00008.json.gz β data/v2/train-00018-of-00020.json.gz
RENAMED
File without changes
|
v2/train/s2orc/00009.json.gz β data/v2/train-00019-of-00020.json.gz
RENAMED
File without changes
|
v2/validation/s2ag/00000.json.gz β data/v2/validation-00000-of-00002.json.gz
RENAMED
File without changes
|
v2/validation/s2orc/00000.json.gz β data/v2/validation-00001-of-00002.json.gz
RENAMED
File without changes
|
pes2o.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gzip
|
2 |
+
import json
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
logger = datasets.logging.get_logger(__name__)
|
7 |
+
|
8 |
+
|
9 |
+
_HOMEPAGE = "https://huggingface.co/datasets/allenai/pes2o"
|
10 |
+
|
11 |
+
|
12 |
+
_DESCRIPTION = "\
|
13 |
+
The PES2O dataset is a collection of ~40M creative commmon licensed academic \
|
14 |
+
papers, cleaned, filtered, and formatted for pre-training of language models. \
|
15 |
+
It is derived from the Semantic Scholar Open Research Corpus(Lo et al, 2020), \
|
16 |
+
or S2ORC.\
|
17 |
+
"
|
18 |
+
|
19 |
+
_LICENSE = "odc-by"
|
20 |
+
|
21 |
+
_VARIANTS = {
|
22 |
+
"v1": {
|
23 |
+
"version": "1.0.0",
|
24 |
+
"download_size": 100702002904,
|
25 |
+
"dataset_size": 67787014,
|
26 |
+
"splits": {
|
27 |
+
"train": {
|
28 |
+
"num_bytes": 100145555091,
|
29 |
+
"num_examples": 67624463,
|
30 |
+
"files": [
|
31 |
+
"data/v1/train-00000-of-00020.json.gz",
|
32 |
+
"data/v1/train-00001-of-00020.json.gz",
|
33 |
+
"data/v1/train-00002-of-00020.json.gz",
|
34 |
+
"data/v1/train-00003-of-00020.json.gz",
|
35 |
+
"data/v1/train-00004-of-00020.json.gz",
|
36 |
+
"data/v1/train-00005-of-00020.json.gz",
|
37 |
+
"data/v1/train-00006-of-00020.json.gz",
|
38 |
+
"data/v1/train-00007-of-00020.json.gz",
|
39 |
+
"data/v1/train-00008-of-00020.json.gz",
|
40 |
+
"data/v1/train-00009-of-00020.json.gz",
|
41 |
+
"data/v1/train-00010-of-00020.json.gz",
|
42 |
+
"data/v1/train-00011-of-00020.json.gz",
|
43 |
+
"data/v1/train-00012-of-00020.json.gz",
|
44 |
+
"data/v1/train-00013-of-00020.json.gz",
|
45 |
+
"data/v1/train-00014-of-00020.json.gz",
|
46 |
+
"data/v1/train-00015-of-00020.json.gz",
|
47 |
+
"data/v1/train-00016-of-00020.json.gz",
|
48 |
+
"data/v1/train-00017-of-00020.json.gz",
|
49 |
+
"data/v1/train-00018-of-00020.json.gz",
|
50 |
+
"data/v1/train-00019-of-00020.json.gz",
|
51 |
+
],
|
52 |
+
},
|
53 |
+
"validation": {
|
54 |
+
"num_bytes": 556447813,
|
55 |
+
"num_examples": 162551,
|
56 |
+
"files": [
|
57 |
+
"data/v1/validation-00000-of-00002.json.gz",
|
58 |
+
"data/v1/validation-00001-of-00002.json.gz",
|
59 |
+
],
|
60 |
+
},
|
61 |
+
},
|
62 |
+
},
|
63 |
+
"v2": {
|
64 |
+
"version": "1.0.0",
|
65 |
+
"download_size": 87129236480,
|
66 |
+
"dataset_size": 38972211,
|
67 |
+
"splits": {
|
68 |
+
"train": {
|
69 |
+
"num_bytes": 86572382178,
|
70 |
+
"num_examples": 38811179,
|
71 |
+
"files": [
|
72 |
+
"data/v2/train-00000-of-00020.json.gz",
|
73 |
+
"data/v2/train-00001-of-00020.json.gz",
|
74 |
+
"data/v2/train-00002-of-00020.json.gz",
|
75 |
+
"data/v2/train-00003-of-00020.json.gz",
|
76 |
+
"data/v2/train-00004-of-00020.json.gz",
|
77 |
+
"data/v2/train-00005-of-00020.json.gz",
|
78 |
+
"data/v2/train-00006-of-00020.json.gz",
|
79 |
+
"data/v2/train-00007-of-00020.json.gz",
|
80 |
+
"data/v2/train-00008-of-00020.json.gz",
|
81 |
+
"data/v2/train-00009-of-00020.json.gz",
|
82 |
+
"data/v2/train-00010-of-00020.json.gz",
|
83 |
+
"data/v2/train-00011-of-00020.json.gz",
|
84 |
+
"data/v2/train-00012-of-00020.json.gz",
|
85 |
+
"data/v2/train-00013-of-00020.json.gz",
|
86 |
+
"data/v2/train-00014-of-00020.json.gz",
|
87 |
+
"data/v2/train-00015-of-00020.json.gz",
|
88 |
+
"data/v2/train-00016-of-00020.json.gz",
|
89 |
+
"data/v2/train-00017-of-00020.json.gz",
|
90 |
+
"data/v2/train-00018-of-00020.json.gz",
|
91 |
+
"data/v2/train-00019-of-00020.json.gz",
|
92 |
+
],
|
93 |
+
},
|
94 |
+
"validation": {
|
95 |
+
"num_bytes": 556854302,
|
96 |
+
"num_examples": 161032,
|
97 |
+
"files": [
|
98 |
+
"data/v1/validation-00000-of-00002.json.gz",
|
99 |
+
"data/v1/validation-00001-of-00002.json.gz",
|
100 |
+
],
|
101 |
+
},
|
102 |
+
},
|
103 |
+
},
|
104 |
+
}
|
105 |
+
|
106 |
+
_FEATURES = datasets.Features(
|
107 |
+
added=datasets.Value("string"),
|
108 |
+
created=datasets.Value("string"),
|
109 |
+
id=datasets.Value("string"),
|
110 |
+
source=datasets.Value("string"),
|
111 |
+
text=datasets.Value("string"),
|
112 |
+
version=datasets.Value("string"),
|
113 |
+
)
|
114 |
+
|
115 |
+
_CITATION = """\
|
116 |
+
@techreport{pes2o,
|
117 |
+
author = {Luca Soldaini and Kyle Lo},
|
118 |
+
year = 2023,
|
119 |
+
title = {{PES2O (Pretraining Efficiently on S2ORC) Dataset}},
|
120 |
+
type = {},
|
121 |
+
url = {https://huggingface.co/datasets/allenai/pes2o}
|
122 |
+
}
|
123 |
+
"""
|
124 |
+
|
125 |
+
|
126 |
+
class Pes2o(datasets.GeneratorBasedBuilder):
|
127 |
+
"""Pretraining Efficiently on S2ORC!"""
|
128 |
+
|
129 |
+
BUILDER_CONFIGS = [
|
130 |
+
datasets.BuilderConfig(name=name, version=config["version"])
|
131 |
+
for name, config in _VARIANTS.items()
|
132 |
+
]
|
133 |
+
|
134 |
+
DEFAULT_CONFIG_NAME = "v2"
|
135 |
+
|
136 |
+
def _info(self):
|
137 |
+
"""Give information and typings for the dataset."""
|
138 |
+
return datasets.DatasetInfo(
|
139 |
+
description=_DESCRIPTION,
|
140 |
+
features=_FEATURES,
|
141 |
+
supervised_keys=None,
|
142 |
+
homepage=_HOMEPAGE,
|
143 |
+
license=_LICENSE,
|
144 |
+
citation=_CITATION,
|
145 |
+
dataset_size=_VARIANTS[self.config.name]["dataset_size"],
|
146 |
+
download_size=_VARIANTS[self.config.name]["download_size"],
|
147 |
+
)
|
148 |
+
|
149 |
+
def _split_generators(self, dl_manager):
|
150 |
+
train_downloaded_files = dl_manager.download(
|
151 |
+
_VARIANTS[self.config.name]["splits"]["train"]["files"]
|
152 |
+
)
|
153 |
+
validation_downloaded_files = dl_manager.download(
|
154 |
+
_VARIANTS[self.config.name]["splits"]["validation"]["files"]
|
155 |
+
)
|
156 |
+
return [
|
157 |
+
datasets.SplitGenerator(
|
158 |
+
name=str(datasets.Split.TRAIN),
|
159 |
+
gen_kwargs={"filepaths": train_downloaded_files},
|
160 |
+
),
|
161 |
+
datasets.SplitGenerator(
|
162 |
+
name=str(datasets.Split.VALIDATION),
|
163 |
+
gen_kwargs={"filepaths": validation_downloaded_files},
|
164 |
+
),
|
165 |
+
]
|
166 |
+
|
167 |
+
def _generate_examples(self, filepaths):
|
168 |
+
"""This function returns the examples in the raw (text) form by
|
169 |
+
iterating on all the files."""
|
170 |
+
id_ = 0
|
171 |
+
for filepath in filepaths:
|
172 |
+
logger.info("generating examples from = %s", filepath)
|
173 |
+
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
|
174 |
+
for line in f:
|
175 |
+
if line:
|
176 |
+
example = json.loads(line)
|
177 |
+
yield id_, example
|
178 |
+
id_ += 1
|