|
|
|
|
|
|
|
"""XWinograd""" |
|
|
|
|
|
import json |
|
|
|
import pandas as pd |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
@misc{tikhonov2021heads, |
|
title={It's All in the Heads: Using Attention Heads as a Baseline for Cross-Lingual Transfer in Commonsense Reasoning}, |
|
author={Alexey Tikhonov and Max Ryabinin}, |
|
year={2021}, |
|
eprint={2106.12066}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
A multilingual collection of Winograd Schemas in six languages \ |
|
that can be used for evaluation of cross-lingual commonsense reasoning capabilities. |
|
""" |
|
|
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/Muennighoff/xwinograd/resolve/main/data/xwinograd.tsv" |
|
|
|
import json |
|
import random |
|
|
|
def winogrande_format(row): |
|
array = row["pronoun"] |
|
position_idx = json.loads(array)[1][0] |
|
|
|
sent = str(u"{}".format(row["sent"])) |
|
start_idx = 0 |
|
for i, tok in enumerate(json.loads(row["toks"])): |
|
tok = str(u"{}".format(tok)) |
|
cur_start_idx = sent.find(tok) |
|
if i == position_idx: |
|
break |
|
sent = sent[cur_start_idx + len(tok):] |
|
start_idx += cur_start_idx + len(tok) |
|
|
|
row["sentence"] = row["sent"][:start_idx] + row["sent"][start_idx:start_idx+len(tok)+1].replace(tok, "_") + row["sent"][start_idx+len(tok)+1:] |
|
|
|
sol = json.loads(row["solution"]) |
|
|
|
cor_answer_idx = random.choice([1, 2]) |
|
incor_answer_idx = 2 if cor_answer_idx == 1 else 1 |
|
|
|
cor_answer = str(u"{}".format(sol[0][0])) if sol[0][-1] == True else str(u"{}".format(sol[1][0])) |
|
incor_answer = str(u"{}".format(sol[0][0])) if sol[0][-1] == False else str(u"{}".format(sol[1][0])) |
|
|
|
row[f"option{cor_answer_idx}"] = cor_answer |
|
row[f"option{incor_answer_idx}"] = incor_answer |
|
row["answer"] = cor_answer_idx |
|
return row |
|
|
|
|
|
class XWinograd(datasets.GeneratorBasedBuilder): |
|
"""XWinograd""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="en", |
|
version=VERSION, |
|
description="X", |
|
), |
|
datasets.BuilderConfig( |
|
name="fr", |
|
version=VERSION, |
|
description="X", |
|
), |
|
datasets.BuilderConfig( |
|
name="jp", |
|
version=VERSION, |
|
description="X", |
|
), |
|
datasets.BuilderConfig( |
|
name="pt", |
|
version=VERSION, |
|
description="X", |
|
), |
|
datasets.BuilderConfig( |
|
name="ru", |
|
version=VERSION, |
|
description="X", |
|
), |
|
datasets.BuilderConfig( |
|
name="zh", |
|
version=VERSION, |
|
description="X", |
|
), |
|
] |
|
DEFAULT_CONFIG_NAME = "en" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"sentence": datasets.Value("string"), |
|
"option1": datasets.Value("string"), |
|
"option2": datasets.Value("string"), |
|
"answer": datasets.Value("string") |
|
} |
|
), |
|
supervised_keys=None, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
downloaded_files = dl_manager.download_and_extract(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={'filepath': downloaded_files} |
|
) |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info("generating examples from = %s", filepath) |
|
ds = pd.read_csv( |
|
filepath, sep='\t', header=None, |
|
names=["lang", "type", "original", "sent", "toks", "pronoun", "solution"] |
|
) |
|
if self.config.name: |
|
ds = ds[ds["lang"] == self.config.name] |
|
ds = ds.apply(winogrande_format, axis=1) |
|
|
|
for idx, row in ds.iterrows(): |
|
yield idx, { |
|
"sentence": row["sentence"], |
|
"option1": row["option1"], |
|
"option2": row["option2"], |
|
"answer": row["answer"], |
|
} |
|
|
|
|