EC40 / EC40.py
ShaomuTan's picture
Update EC40.py
93f0ec4
raw
history blame
4.61 kB
"""X-stance dataset for German and French/Italian stance detection"""
import csv
import json
import os
import datasets
_CITATION = """\
@inproceedings{vamvas2020xstance,
author = "Vamvas, Jannis and Sennrich, Rico",
title = "{X-Stance}: A Multilingual Multi-Target Dataset for Stance Detection",
booktitle = "Proceedings of the 5th Swiss Text Analytics Conference (SwissText) \& 16th Conference on Natural Language Processing (KONVENS)",
address = "Zurich, Switzerland",
year = "2020",
month = "jun",
url = "http://ceur-ws.org/Vol-2624/paper9.pdf"
}
"""
_DESCRIPTION = """\
The x-stance dataset contains more than 150 political questions, and 67k comments written by candidates on those questions. The comments are partly German, partly French and Italian. The data have been extracted from the Swiss voting advice platform Smartvote.
"""
_HOMEPAGE = "https://github.com/Smu-Tan/ZS-NMT-Variations/tree/main"
_LICENSE = "cc-by-4.0"
_langs = [
'af',
'am',
'ar',
'ast',
'be',
'bg',
'bn',
'bs',
'ca',
'cs',
'da',
'de',
'es',
'fr',
'gu',
'ha',
'he',
'hi',
'is',
'it',
'kab',
'kn',
'lb',
'mr',
'mt',
'ne',
'nl',
'no',
'oc',
'pl',
'pt',
'ro',
'ru',
'sd',
'so',
'sr',
'sv',
'ti',
'uk',
'ur'
]
_En_centric_Pairs = ['en-'+i for i in _langs]
_ZS_Pairs = [i+'-'+j for i in _langs for j in _langs if i!=j]
class EC40Config(datasets.BuilderConfig):
def __init__(self, language_pair, **kwargs):
super().__init__(**kwargs)
"""
Args:
language_pair: language pair, you want to load
**kwargs: keyword arguments forwarded to super.
"""
self.language_pair = language_pair
class EC40(datasets.GeneratorBasedBuilder):
"""EC40 is English-centric, meaning that all training pairs include English on either the source or target side."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = EC40Config
BUILDER_CONFIGS = [
EC40Config(name=pair, description=None, language_pair=pair)
for pair in _En_centric_Pairs + _ZS_Pairs
]
def _info(self):
src_tag, tgt_tag = self.config.language_pair.split("-")
return datasets.DatasetInfo(
description=None,
features=datasets.Features({"translation": datasets.features.Translation(languages=(src_tag, tgt_tag))}),
supervised_keys=(src_tag, tgt_tag),
homepage="https://github.com/Smu-Tan/ZS-NMT-Variations/tree/main/EC40_dataset",
citation=None,
)
def _split_generators(self, dl_manager):
lang_pair = self.config.language_pair
src_tag, tgt_tag = lang_pair.split("-")
train_src = dl_manager.download_and_extract(f"EC40-train-set/{self.config.language_pair}.{src_tag}")
train_tgt = dl_manager.download_and_extract(f"EC40-train-set/{self.config.language_pair}.{tgt_tag}")
valid_src = dl_manager.download_and_extract(f"Ntrex-eval-set/test.{self.config.language_pair}.{src_tag}")
valid_tgt = dl_manager.download_and_extract(f"Ntrex-eval-set/test.{self.config.language_pair}.{tgt_tag}")
test_src = dl_manager.download_and_extract(f"Flores200-test-set/en-test-set/test.{self.config.language_pair}.{src_tag}")
test_tgt = dl_manager.download_and_extract(f"Flores200-test-set/en-test-set/test.{self.config.language_pair}.{tgt_tag}")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_src, "labelpath": train_tgt, "split": "train",}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_src, "labelpath": valid_tgt, "split": "validation"}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_src, "labelpath": test_tgt, "split": "test"}),
]
def _generate_examples(self, filepath, labelpath, split):
"""Yields examples."""
src_tag, tgt_tag = self.config.language_pair.split("-")
src, tgt = None, None
with open(filepath, encoding="utf-8") as f:
src = f.read().split("\n")[:-1]
with open(labelpath, encoding="utf-8") as f:
tgt = f.read().split("\n")[:-1]
if src is not None and tgt is not None:
for idx, (s, t) in enumerate(zip(src, tgt)):
yield idx, {"translation": {src_tag: s, tgt_tag: t}}