Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
n<1K
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
Tags:
License:
File size: 5,709 Bytes
4b57a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a687453
4b57a33
 
ee0dc6e
 
 
4b57a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a687453
4b57a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee0dc6e
4b57a33
 
 
 
 
 
 
 
 
 
ee0dc6e
4b57a33
 
 
 
 
 
 
 
 
 
ee0dc6e
4b57a33
 
 
 
 
 
 
 
 
 
 
 
f268737
4b57a33
f268737
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OneStopEnglish Corpus: Dataset of texts classified into reading levels/text complexities."""


import os

import datasets
from datasets.tasks import TextClassification


logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@inproceedings{vajjala-lucic-2018-onestopenglish,
    title = {OneStopEnglish corpus: A new corpus for automatic readability assessment and text simplification},
    author = {Sowmya Vajjala and Ivana Lučić},
    booktitle = {Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications},
    year = {2018}
}
"""

_DESCRIPTION = """\
This dataset is a compilation of the OneStopEnglish corpus of texts written at three reading levels into one file.
Text documents are classified into three reading levels - ele, int, adv (Elementary, Intermediate and Advance).
This dataset demonstrates its usefulness for through two applica-tions - automatic  readability  assessment  and automatic text simplification.
The corpus consists of 189 texts, each in three versions/reading levels (567 in total).
"""

_HOMEPAGE = "https://github.com/nishkalavallabhi/OneStopEnglishCorpus"

_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International License"

_URL = "https://github.com/purvimisal/OneStopCorpus-Compiled/raw/main/Texts-SeparatedByReadingLevel.zip"


# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
class OnestopEnglish(datasets.GeneratorBasedBuilder):
    """OneStopEnglish Corpus: Dataset of texts classified into reading levels"""

    VERSION = datasets.Version("1.1.0")

    def _info(self):
        # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["ele", "int", "adv"])}
            ),
            supervised_keys=[""],
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
            task_templates=[TextClassification(text_column="text", label_column="label")],
        )

    def _vocab_text_gen(self, train_file):
        for _, ex in self._generate_examples(train_file):
            yield ex["text"]

    def _split_generators(self, dl_manager):
        """Downloads OneStopEnglish corpus"""
        extracted_folder_path = dl_manager.download_and_extract(_URL)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"split_key": "train", "data_dir": extracted_folder_path},
            )
        ]

    def _get_examples_from_split(self, split_key, data_dir):
        """Reads the downloaded and extracted files and combines the individual text files to one dataset."""

        data_dir = os.path.join(data_dir, "Texts-SeparatedByReadingLevel")

        ele_samples = []
        dir_path = os.path.join(data_dir, "Ele-Txt")
        files = os.listdir(dir_path)
        for f in sorted(files):
            try:
                with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
                    text = myfile.read().strip()
                    ele_samples.append(text)
            except Exception as e:
                logger.info("Error with:", os.path.join(dir_path, f), e)

        int_samples = []
        dir_path = os.path.join(data_dir, "Int-Txt")
        files = os.listdir(dir_path)
        for f in sorted(files):
            try:
                with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
                    text = myfile.read().strip()
                    int_samples.append(text)
            except Exception as e:
                logger.info("Error with:", os.path.join(dir_path, f), e)

        adv_samples = []
        dir_path = os.path.join(data_dir, "Adv-Txt")
        files = os.listdir(dir_path)
        for f in sorted(files):
            try:
                with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
                    text = myfile.read().strip()
                    adv_samples.append(text)
            except Exception as e:
                logger.info("Error with:", os.path.join(dir_path, f), e)

        train_samples = ele_samples + int_samples + adv_samples
        train_labels = (["ele"] * len(ele_samples)) + (["int"] * len(int_samples)) + (["adv"] * len(adv_samples))

        if split_key == "train":
            return (train_samples, train_labels)
        else:
            raise ValueError(f"Invalid split key {split_key}")

    def _generate_examples(self, split_key, data_dir):
        """Yields examples for a given split of dataset."""
        split_text, split_labels = self._get_examples_from_split(split_key, data_dir)
        for id_, (text, label) in enumerate(zip(split_text, split_labels)):
            feature_dict = {"text": text, "label": label}
            yield id_, feature_dict