turk_corpus / turk_corpus.py
waboucay's picture
Add loader file + model card
2d98a42 verified
import csv
import json
# Lint as: python3
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@article{Xu-EtAl:2016:TACL,
author = {Wei Xu and Courtney Napoles and Ellie Pavlick and Quanze Chen and Chris Callison-Burch},
title = {Optimizing Statistical Machine Translation for Text Simplification},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year = {2016},
url = {https://cocoxu.github.io/publications/tacl2016-smt-simplification.pdf},
pages = {401--415}
}
"""
_DESCRIPTION = """Corpus of sentences gathered from Wikipedia and simplifications proposed by Amazon MTurk workers.
Data gathered by Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen and Chris Callison-Burch."""
_URLS = {
"tune": "https://huggingface.co/datasets/waboucay/turk_corpus/raw/main/tune.8turkers.organized.tsv",
"test": "https://huggingface.co/datasets/waboucay/turk_corpus/raw/main/test.8turkers.organized.tsv"
}
_TUNE_FILE = "tune.json"
_TEST_FILE = "test.json"
class TurkCorpusConfig(datasets.BuilderConfig):
"""BuilderConfig for WikiLarge dataset"""
def __init__(self, **kwargs):
"""BuilderConfig for Turk Corpus dataset
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(TurkCorpusConfig, self).__init__(**kwargs)
class TurkCorpus(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0", "")
BUILDER_CONFIG_CLASS = TurkCorpusConfig
BUILDER_CONFIGS = [
TurkCorpusConfig(
name="turk_corpus",
version=datasets.Version("1.0.0", ""),
description=_DESCRIPTION,
)
]
def _info(self):
features = datasets.Features(
{
"complex": datasets.Value("string"),
"simple": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="https://github.com/cocoxu/simplification/tree/master",
)
def _split_generators(self, dl_manager):
dl_files = dl_manager.download(_URLS)
tune_path = os.path.join(os.path.dirname(dl_files["test"]), _TUNE_FILE)
test_path = os.path.join(os.path.dirname(dl_files["test"]), _TEST_FILE)
tune_data_path = os.path.abspath(dl_files["tune"])
test_data_path = os.path.abspath(dl_files["test"])
with open(tune_data_path, encoding="utf-8") as tune_data, open(test_data_path, encoding="utf-8") as test_data, \
open(tune_path, "w", encoding="utf-8") as tune_json, open(test_path, "w", encoding="utf-8") as test_json:
tune_reader = csv.reader(tune_data, delimiter="\t")
test_reader = csv.reader(test_data, delimiter="\t")
tune_data = []
for line in tune_reader:
tune_data.append({"complex": line[1], "simple": line[2:]})
json.dump(tune_data, tune_json)
test_data = []
for line in test_reader:
test_data.append({"complex": line[1], "simple": line[2:]})
json.dump(test_data, test_json)
data_files = {
"tune": tune_path,
"test": test_path,
}
return [
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["tune"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
with open(filepath, encoding="utf-8") as f:
guid = 0
data = json.load(f)
for obj in data:
yield guid, {
"complex": obj["complex"],
"simple": obj["simple"]
}
guid += 1