change_it / change_it.py
gsarti's picture
Update change_it.py
936c253
raw
history blame
5.81 kB
import csv
import os
import sys
import datasets
csv.field_size_limit(sys.maxsize)
_CITATION = """\
@inproceedings{demattei-etal-2020-changeit,
author = {De Mattei, Lorenzo and Cafagna, Michele and Dell'Orletta, Felice and Nissim, Malvina and Gatt, Albert},
title = {{CHANGE-IT @ EVALITA 2020}: Change Headlines, Adapt News, GEnerate},
booktitle = {Proceedings of Seventh Evaluation Campaign of Natural Language Processing and Speech Tools for Italian. Final Workshop (EVALITA 2020)},
editor = {Basile, Valerio and Croce, Danilo and Di Maro, Maria, and Passaro, Lucia C.},
publisher = {CEUR.org},
year = {2020},
address = {Online}
}
"""
_DESCRIPTION = """\
The CHANGE-IT dataset contains approximately 152,000 article-headline pairs, collected from two Italian
newspapers situated at opposite ends of the political spectrum, namely la Repubblica (left) and
Il Giornale (right), with the two newspapers equally represented. The dataset has been used in the context
of the CHANGE-IT task (https://sites.google.com/view/change-it) during the Evalita 2020 evaluation campaign
(http://www.evalita.it/2020). CHANGE-IT is a generation task for Italian – more specifically, a style transfer
task for headlines of Italian newspapers. Given a (collection of) headlines from one newspaper, namely
Il Giornale (G) or La Repubblica (R), it challenges automatic systems to change all G-headlines to headlines in
style R, and all R-headlines to headlines in style G. Although the task only concerns headline change, the dataset
comprehends both the headlines as well as their respective full articles.
"""
_HOMEPAGE = "https://live.european-language-grid.eu/catalogue/corpus/7373"
_LICENSE = "Creative Commons Attribution Non Commercial Share Alike 4.0 International"
_CONFIGS = ["repubblica", "ilgiornale"]
_SPLITS = {
"train": "train",
"test": "change-it-test-set"
}
_PATHS = {
cfg:{
split:os.path.join("CHANGE-it", split_path, f"change-it.{cfg}.{split}.csv")
for (split, split_path) in _SPLITS.items()
} for cfg in _CONFIGS
}
class ChangeItConfig(datasets.BuilderConfig):
"""BuilderConfig for CHANGE-IT."""
def __init__(
self,
**kwargs,
):
"""BuilderConfig for CHANGE-IT.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
class ChangeIt(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
ChangeItConfig(
name=_CONFIGS[0],
),
ChangeItConfig(
name=_CONFIGS[1],
),
]
@property
def manual_download_instructions(self):
return (
"To use CHANGE-IT you have to download it manually from the European Language Grid website."
"Please visit https://live.european-language-grid.eu/catalogue/corpus/7373, download and unizip"
"the folder. The root must contain a CHANGE-it subfolder, that contains the train and change-it-test-set subfolders."
"Then, load the dataset with: `datasets.load_dataset('gsarti/change_it', data_dir='path/to/root/folder')`"
)
def get_alignment_rating(self, id, split):
if split == "train":
if id <= 5000:
return "A1"
elif 5000 < id < 15000:
return "A3"
else:
return "R"
elif split == "test":
return "A2"
else:
raise ValueError("Unknown split {}".format(split))
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("int32"),
"headline": datasets.Value("string"),
"full_text": datasets.Value("string"),
"alignment": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(data_dir):
raise FileNotFoundError(
"{} does not exist. Make sure you insert the unzipped CHANGE-IT dir via "
"`datasets.load_dataset('gsarti/change_it', data_dir=...)`"
"Manual download instructions: {}".format(
data_dir, self.manual_download_instructions
)
)
cfg_paths = _PATHS[self.config.name]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, cfg_paths["train"]),
"split": "train"
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, cfg_paths["test"]),
"split": "test"
},
),
]
def _generate_examples(self, filepath: str, split: str):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf8") as f:
reader = csv.DictReader(f)
for id_, row in enumerate(reader):
yield id_, {
"id": id_,
"headline": row["headline"],
"full_text": row["full_text"],
"alignment": self.get_alignment_rating(id_, split)
}