File size: 4,021 Bytes
2d98a42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import csv
import json
# Lint as: python3
import os

import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = """
 @article{Xu-EtAl:2016:TACL,
 author = {Wei Xu and Courtney Napoles and Ellie Pavlick and Quanze Chen and Chris Callison-Burch},
 title = {Optimizing Statistical Machine Translation for Text Simplification},
 journal = {Transactions of the Association for Computational Linguistics},
 volume = {4},
 year = {2016},
 url = {https://cocoxu.github.io/publications/tacl2016-smt-simplification.pdf},
 pages = {401--415}
 }
"""

_DESCRIPTION = """Corpus of sentences gathered from Wikipedia and simplifications proposed by Amazon MTurk workers. 

Data gathered by Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen and Chris Callison-Burch."""

_URLS = {
    "tune": "https://huggingface.co/datasets/waboucay/turk_corpus/raw/main/tune.8turkers.organized.tsv",
    "test": "https://huggingface.co/datasets/waboucay/turk_corpus/raw/main/test.8turkers.organized.tsv"
}
_TUNE_FILE = "tune.json"
_TEST_FILE = "test.json"


class TurkCorpusConfig(datasets.BuilderConfig):
    """BuilderConfig for WikiLarge dataset"""

    def __init__(self, **kwargs):
        """BuilderConfig for Turk Corpus dataset
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(TurkCorpusConfig, self).__init__(**kwargs)


class TurkCorpus(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0", "")
    BUILDER_CONFIG_CLASS = TurkCorpusConfig
    BUILDER_CONFIGS = [
        TurkCorpusConfig(
            name="turk_corpus",
            version=datasets.Version("1.0.0", ""),
            description=_DESCRIPTION,
        )
    ]

    def _info(self):
        features = datasets.Features(
            {
                "complex": datasets.Value("string"),
                "simple": datasets.Sequence(datasets.Value("string")),
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage="https://github.com/cocoxu/simplification/tree/master",
        )

    def _split_generators(self, dl_manager):
        dl_files = dl_manager.download(_URLS)

        tune_path = os.path.join(os.path.dirname(dl_files["test"]), _TUNE_FILE)
        test_path = os.path.join(os.path.dirname(dl_files["test"]), _TEST_FILE)

        tune_data_path = os.path.abspath(dl_files["tune"])
        test_data_path = os.path.abspath(dl_files["test"])

        with open(tune_data_path, encoding="utf-8") as tune_data, open(test_data_path, encoding="utf-8") as test_data, \
             open(tune_path, "w", encoding="utf-8") as tune_json, open(test_path, "w", encoding="utf-8") as test_json:

            tune_reader = csv.reader(tune_data, delimiter="\t")
            test_reader = csv.reader(test_data, delimiter="\t")

            tune_data = []
            for line in tune_reader:
                tune_data.append({"complex": line[1], "simple": line[2:]})
            json.dump(tune_data, tune_json)

            test_data = []
            for line in test_reader:
                test_data.append({"complex": line[1], "simple": line[2:]})
            json.dump(test_data, test_json)

        data_files = {
            "tune": tune_path,
            "test": test_path,
        }

        return [
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["tune"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""

        with open(filepath, encoding="utf-8") as f:
            guid = 0

            data = json.load(f)
            for obj in data:
                yield guid, {
                    "complex": obj["complex"],
                    "simple": obj["simple"]
                }
                guid += 1