wsdmt / wsdmt.py
Valahaar
small fix
d16b251
import json
import bz2
import datasets
from datasets import DownloadManager, DatasetInfo
def _order_langs(lang1, lang2):
return (lang1, lang2) if lang1 < lang2 else (lang2, lang1)
class WSDMTConfig(datasets.BuilderConfig):
def __init__(self, *args, corpus, lang1, lang2, variety='base', challenge=False, **kwargs):
lang1, lang2 = _order_langs(lang1, lang2)
super().__init__(
*args,
name=f"{corpus}{'#challenge' if challenge else ''}@{lang1}-{lang2}@{variety}",
**kwargs,
)
self.lang1 = lang1
self.lang2 = lang2
self.corpus = corpus
self.variety = variety
self.challenge = challenge
def path_for(self, split, lang):
split_path = ('challenge/' if self.challenge else '') + split
return f"data/{self.corpus}/{self.variety}/{split_path}/{lang}.jsonl.bz2"
POS_TAGS = """ADJ
ADP
ADV
AUX
CCONJ
DET
INTJ
NOUN
NUM
PART
PRON
PROPN
PUNCT
SCONJ
SYM
VERB
X""".splitlines()
class WSDMTDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = WSDMTConfig
config: WSDMTConfig
def _generate_examples(self, path_lang1, path_lang2):
with bz2.open(path_lang1) as f1, bz2.open(path_lang2) as f2:
for n, (line1, line2) in enumerate(zip(f1, f2)):
sid1, data1 = self._read_json_line(line1)
sid2, data2 = self._read_json_line(line2)
assert sid1 == sid2, (
f"Different sentence id found for {self.config.lang1} and {self.config.lang2}: "
f"{sid1} != {sid2} at line {n}"
)
data_dict = {
'sid': sid1,
self.config.lang1: data1,
self.config.lang2: data2,
}
yield n, data_dict
@classmethod
def _read_json_line(cls, line):
obj = json.loads(line)
sid = obj.pop('sid')
sentence = obj.pop('sentence')
data = obj.pop('data')
tokens, lemmas, pos_tags, senses, is_senses, is_polysemous, *_ = zip(*data)
assert len(tokens) == len(lemmas) == len(pos_tags) == len(senses) == len(is_senses) == len(is_polysemous), (
f"Inconsistent annotation lengths in sentence {sid}"
)
return sid, dict(
sentence=sentence,
tokens=tokens, lemmas=lemmas, pos_tags=pos_tags,
sense=senses, identified_as_sense=is_senses, is_polysemous=is_polysemous,
)
def _info(self) -> DatasetInfo:
language_features = dict(
sentence=datasets.Value("string"),
tokens=datasets.Sequence(datasets.Value("string")),
sense=datasets.Sequence(datasets.Value("string")),
identified_as_sense=datasets.Sequence(datasets.Value("bool")),
is_polysemous=datasets.Sequence(datasets.Value("bool")),
lemmas=datasets.Sequence(datasets.Value("string")),
pos_tags=datasets.Sequence(datasets.ClassLabel(names=POS_TAGS)),
# pos_tags=datasets.Sequence(datasets.Value("string")),
)
return datasets.DatasetInfo(
description="empty description",
features=datasets.Features(
{
"sid": datasets.Value("string"),
self.config.lang1: language_features,
self.config.lang2: language_features
},
),
supervised_keys=None,
homepage="no-homepage",
citation="no-citation",
)
def _split_generators(self, dl_manager: DownloadManager):
if self.config.challenge:
split_names = ['wsd_bias', 'adversarial']
else:
splits_file = dl_manager.download(f'data/{self.config.corpus}/splits.txt')
with open(splits_file) as f:
split_names = [line.rstrip() for line in f]
urls = {
split: {
self.config.lang1: self.config.path_for(split, self.config.lang1),
self.config.lang2: self.config.path_for(split, self.config.lang2),
}
for split in split_names
if not (split == 'wsd_bias' and 'adv.' in self.config.lang1)
}
downloaded = dl_manager.download(urls)
return [
datasets.SplitGenerator(name=split,
gen_kwargs=dict(
path_lang1=paths[self.config.lang1],
path_lang2=paths[self.config.lang2],
))
for split, paths in downloaded.items()
]
if __name__ == '__main__':
from datasets import load_dataset
load_dataset('Valahaar/wsdmt', corpus='wmt', variety='all', lang1='en', lang2='de', script_version='main')