ketong3906 commited on
Commit
20cd80c
1 Parent(s): c5c45b4

Upload 3 files

Browse files
Files changed (3) hide show
  1. dataset_info.json +53 -0
  2. onestop_english.py +135 -0
  3. state.json +13 -0
dataset_info.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "onestop_english",
3
+ "citation": "@inproceedings{vajjala-lucic-2018-onestopenglish,\n title = {OneStopEnglish corpus: A new corpus for automatic readability assessment and text simplification},\n author = {Sowmya Vajjala and Ivana Lu\u010di\u0107},\n booktitle = {Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications},\n year = {2018}\n}\n",
4
+ "config_name": "default",
5
+ "dataset_name": "onestop_english",
6
+ "dataset_size": 2278039,
7
+ "description": "This dataset is a compilation of the OneStopEnglish corpus of texts written at three reading levels into one file.\nText documents are classified into three reading levels - ele, int, adv (Elementary, Intermediate and Advance).\nThis dataset demonstrates its usefulness for through two applica-tions - automatic readability assessment and automatic text simplification.\nThe corpus consists of 189 texts, each in three versions/reading levels (567 in total).\n",
8
+ "download_checksums": {
9
+ "https://github.com/purvimisal/OneStopCorpus-Compiled/raw/main/Texts-SeparatedByReadingLevel.zip": {
10
+ "num_bytes": 1228804,
11
+ "checksum": null
12
+ }
13
+ },
14
+ "download_size": 1228804,
15
+ "features": {
16
+ "text": {
17
+ "dtype": "string",
18
+ "_type": "Value"
19
+ },
20
+ "label": {
21
+ "names": [
22
+ "ele",
23
+ "int",
24
+ "adv"
25
+ ],
26
+ "_type": "ClassLabel"
27
+ }
28
+ },
29
+ "homepage": "https://github.com/nishkalavallabhi/OneStopEnglishCorpus",
30
+ "license": "Creative Commons Attribution-ShareAlike 4.0 International License",
31
+ "size_in_bytes": 3506843,
32
+ "splits": {
33
+ "train": {
34
+ "name": "train",
35
+ "num_bytes": 2278039,
36
+ "num_examples": 567,
37
+ "dataset_name": "onestop_english"
38
+ }
39
+ },
40
+ "supervised_keys": {},
41
+ "task_templates": [
42
+ {
43
+ "task": "text-classification",
44
+ "label_column": "label"
45
+ }
46
+ ],
47
+ "version": {
48
+ "version_str": "1.1.0",
49
+ "major": 1,
50
+ "minor": 1,
51
+ "patch": 0
52
+ }
53
+ }
onestop_english.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """OneStopEnglish Corpus: Dataset of texts classified into reading levels/text complexities."""
16
+
17
+
18
+ import os
19
+
20
+ import datasets
21
+ from datasets.tasks import TextClassification
22
+
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{vajjala-lucic-2018-onestopenglish,
29
+ title = {OneStopEnglish corpus: A new corpus for automatic readability assessment and text simplification},
30
+ author = {Sowmya Vajjala and Ivana Lučić},
31
+ booktitle = {Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications},
32
+ year = {2018}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ This dataset is a compilation of the OneStopEnglish corpus of texts written at three reading levels into one file.
38
+ Text documents are classified into three reading levels - ele, int, adv (Elementary, Intermediate and Advance).
39
+ This dataset demonstrates its usefulness for through two applica-tions - automatic readability assessment and automatic text simplification.
40
+ The corpus consists of 189 texts, each in three versions/reading levels (567 in total).
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/nishkalavallabhi/OneStopEnglishCorpus"
44
+
45
+ _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International License"
46
+
47
+ _URL = "https://github.com/purvimisal/OneStopCorpus-Compiled/raw/main/Texts-SeparatedByReadingLevel.zip"
48
+
49
+
50
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
51
+ class OnestopEnglish(datasets.GeneratorBasedBuilder):
52
+ """OneStopEnglish Corpus: Dataset of texts classified into reading levels"""
53
+
54
+ VERSION = datasets.Version("1.1.0")
55
+
56
+ def _info(self):
57
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
58
+ return datasets.DatasetInfo(
59
+ description=_DESCRIPTION,
60
+ features=datasets.Features(
61
+ {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["ele", "int", "adv"])}
62
+ ),
63
+ supervised_keys=[""],
64
+ homepage=_HOMEPAGE,
65
+ license=_LICENSE,
66
+ citation=_CITATION,
67
+ task_templates=[TextClassification(text_column="text", label_column="label")],
68
+ )
69
+
70
+ def _vocab_text_gen(self, train_file):
71
+ for _, ex in self._generate_examples(train_file):
72
+ yield ex["text"]
73
+
74
+ def _split_generators(self, dl_manager):
75
+ """Downloads OneStopEnglish corpus"""
76
+ extracted_folder_path = dl_manager.download_and_extract(_URL)
77
+ return [
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.TRAIN,
80
+ gen_kwargs={"split_key": "train", "data_dir": extracted_folder_path},
81
+ )
82
+ ]
83
+
84
+ def _get_examples_from_split(self, split_key, data_dir):
85
+ """Reads the downloaded and extracted files and combines the individual text files to one dataset."""
86
+
87
+ data_dir = os.path.join(data_dir, "Texts-SeparatedByReadingLevel")
88
+
89
+ ele_samples = []
90
+ dir_path = os.path.join(data_dir, "Ele-Txt")
91
+ files = os.listdir(dir_path)
92
+ for f in sorted(files):
93
+ try:
94
+ with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
95
+ text = myfile.read().strip()
96
+ ele_samples.append(text)
97
+ except Exception as e:
98
+ logger.info("Error with:", os.path.join(dir_path, f), e)
99
+
100
+ int_samples = []
101
+ dir_path = os.path.join(data_dir, "Int-Txt")
102
+ files = os.listdir(dir_path)
103
+ for f in sorted(files):
104
+ try:
105
+ with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
106
+ text = myfile.read().strip()
107
+ int_samples.append(text)
108
+ except Exception as e:
109
+ logger.info("Error with:", os.path.join(dir_path, f), e)
110
+
111
+ adv_samples = []
112
+ dir_path = os.path.join(data_dir, "Adv-Txt")
113
+ files = os.listdir(dir_path)
114
+ for f in sorted(files):
115
+ try:
116
+ with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
117
+ text = myfile.read().strip()
118
+ adv_samples.append(text)
119
+ except Exception as e:
120
+ logger.info("Error with:", os.path.join(dir_path, f), e)
121
+
122
+ train_samples = ele_samples + int_samples + adv_samples
123
+ train_labels = (["ele"] * len(ele_samples)) + (["int"] * len(int_samples)) + (["adv"] * len(adv_samples))
124
+
125
+ if split_key == "train":
126
+ return (train_samples, train_labels)
127
+ else:
128
+ raise ValueError(f"Invalid split key {split_key}")
129
+
130
+ def _generate_examples(self, split_key, data_dir):
131
+ """Yields examples for a given split of dataset."""
132
+ split_text, split_labels = self._get_examples_from_split(split_key, data_dir)
133
+ for id_, (text, label) in enumerate(zip(split_text, split_labels)):
134
+ feature_dict = {"text": text, "label": label}
135
+ yield id_, feature_dict
state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "c6f001bca857b8c0",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "train"
13
+ }