File size: 7,768 Bytes
3c495f2 e0b06e3 c29c088 3c495f2 e0b06e3 3c495f2 4dd3744 3c495f2 4dd3744 3c495f2 4dd3744 3c495f2 4dd3744 3c495f2 24f97d4 5500d07 24f97d4 65b65de e0b06e3 0fea600 e0b06e3 24f97d4 e0b06e3 b920d74 27bda37 e0b06e3 4857d85 e0b06e3 2ba3399 e0b06e3 c29d973 24f97d4 3c495f2 e0b06e3 459014c 33f446e 459014c 33f446e 459014c 3c495f2 e0b06e3 459014c e0b06e3 3c495f2 e0b06e3 7ba9dbe 2cf440c e0b06e3 3c495f2 e0b06e3 459014c e0b06e3 fda2265 7987d7a f7a2032 7987d7a 09df852 3c495f2 7987d7a 3c495f2 e0b06e3 459014c e0b06e3 3c495f2 24af8d4 2cf440c fde9896 a444eea 7e509d5 be53e01 015be83 fde9896 375ded2 2cf440c 8eac489 3c495f2 e0b06e3 459014c 415a155 e0b06e3 3c495f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
import datasets
import csv
import requests
import pandas as pd
import inspect
import copy
from .process_underscores import run
key_to_entry = requests.get('https://www.dropbox.com/scl/fi/85pnc7n6e4puoureavtzo/filtered_disrpt.json?rlkey=6cbgbe9vn2549eths7ah8gm7u&dl=1').json()
citation="\n".join(key_to_entry.values())
datasets_and_citations = {
"deu.rst.pcc": "stede-neumann-2014-potsdam",
"eng.dep.covdtb": "nishida-matsumoto-2022-domain",
"eng.dep.scidtb": "yang-li-2018-scidtb",
"eng.rst.gum": "Zeldes2017",
"eng.rst.rstdt": "carlson-etal-2001-building",
"eng.sdrt.stac": "asher-etal-2016-discourse",
"eus.rst.ert": "IruskietaAranzabeIlarrazaEtAl2013",
"fas.rst.prstc": "shahmohammadi2021persian",
"fra.sdrt.annodis": "afantenos-etal-2012-empirical",
"nld.rst.nldt": "redeker-etal-2012-multi",
"por.rst.cstn": "CardosoMazieroRosarioCastroJorgeEtAl2011",
"rus.rst.rrt": "toldova-etal-2017-rhetorical",
"spa.rst.rststb": "da-cunha-etal-2011-development",
"spa.rst.sctb": "cao-etal-2018-rst",
"zho.dep.scidtb": "yi-etal-2021-unifying,cheng-li-2019-zero",
"zho.rst.gcdt": "peng_gcdt_2022,peng_chinese_2022",
"zho.rst.sctb": "cao-etal-2018-rst",
"eng.pdtb.pdtb": "prasad-etal-2014-reflections",
"eng.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"ita.pdtb.luna": "tonelli-etal-2010-annotation,RiccardiStepanovChowdhury2016",
"por.pdtb.crpc": "CRPC-DB-Portuguese,genereux-etal-2012-introducing",
"por.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"tha.pdtb.tdtb": "",
"tur.pdtb.tdb": "zeyrek-webber-2008-discourse,zeyrek-kurfali-2017-tdb",
"tur.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"zho.pdtb.cdtb": "Zhou2014"
}
class Config(datasets.BuilderConfig):
citation=citation
files = [
"deu.rst.pcc",
"eng.dep.covdtb",
"eng.dep.scidtb",
"eng.pdtb.gum",
"eng.pdtb.pdtb",
"eng.pdtb.tedm",
"eng.rst.gentle",
"eng.rst.gum",
"eng.rst.rstdt",
"eng.sdrt.stac",
"eus.rst.ert",
"fas.rst.prstc",
"fra.sdrt.annodis",
"ita.pdtb.luna",
"nld.rst.nldt",
"por.pdtb.crpc",
"por.pdtb.tedm",
"por.rst.cstn",
"rus.rst.rrt",
"spa.rst.rststb",
"spa.rst.sctb",
"tha.pdtb.tdtb",
"tur.pdtb.tdb",
"tur.pdtb.tedm",
"zho.dep.scidtb",
"zho.pdtb.cdtb",
"zho.rst.gcdt",
"zho.rst.sctb"
]
def fix_mwe(sentence):
mwe={}
sentence['parent_mwe']=[]
for i, x in enumerate(sentence['id']):
if '-' in x:
for a in x.split('-'):
mwe[a]=sentence['form'][i]
sentence['parent_mwe']+=[mwe.get(x,'')]
for i, x in enumerate(sentence['id']):
if "-" in x:
for k,v in sentence.items():
del v[i]
return sentence
def parse_conll_stream(file_stream):
names = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc','doc_id']
sentence = {name: [] for name in names}
mwe_id=[]
for line in file_stream:
line = line.strip()
if line.startswith("#"):
if "doc_id" in line:
doc_id=line.split('=')[-1].strip()
continue
if not line:
if sentence['id']:
yield sentence
sentence = {name: [] for name in names}
continue
token_data = line.split('\t') + [doc_id]
for name, value in zip(names, token_data):
if name=='id' and not value.isnumeric():
mwe_id=value.split('-')
else:
sentence[name].append(value)
def get_kwarg_names(func):
return [k for k, v in inspect.signature(func).parameters.items() if v.default != v.empty]
_URLs = {f'{task}-{split}.{type}':f"https://raw.githubusercontent.com/disrpt/sharedtask2023/main/data/{task}/{task}_{split}.{type}" \
for task in files for split in 'train dev test'.split() for type in ['rels','conllu']}
#_URLs = {k:v for k,v in _URLs.items() if requests.get(v).status_code!=404}
conllu_features = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc', 'seg','doc_id']
feature_type = {"seg":datasets.features.Sequence(
datasets.features.ClassLabel(names=["O","B-Segment"])),
'id':datasets.Value("string"),'doc_id':datasets.Value("string")}
conllu_features = datasets.Features({x:feature_type.get(x,datasets.Sequence(datasets.Value("string")))
for x in conllu_features})
def map_seg(x):
return [("B-Segment" if "beginseg=yes" in a.lower() else "O") for a in x]
def remove_type(x):
return x.replace(".rels","").replace(".conllu","")
class Dataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
Config(
name=f"{n}.{type}",
data_dir=f"{n}.{type}",
) for n in files for type in ["rels","conllu"]
]
def __init__(self,*args,**kwargs):
self.BUILDER_CONFIG_CLASS.__post_init__=lambda x:x
base_kwargs_names=get_kwarg_names(super().__init__)
gen_kwargs={}
self.files={}
self.preprocessed_underscores=dict()
for k,v in copy.deepcopy(kwargs).items():
if k not in base_kwargs_names:
gen_kwargs[k]=v
del kwargs[k]
self.gen_kwargs=gen_kwargs
return super().__init__(*args,**kwargs)
def _split_generators(self, dl_manager: datasets.DownloadManager):
cfg_name = self.config.name.rsplit('.', 1)[0]
data_dir = remove_type(self.config.data_dir)
type = self.config.name.split('.')[-1]
urls={k:v for (k,v) in _URLs.items() if cfg_name in k and requests.get(v).status_code!=404}
data_file = dl_manager.download(urls)
self.files = {**self.files, **data_file}
splits_dict = {datasets.Split.TRAIN: 'train', datasets.Split.VALIDATION: 'dev', datasets.Split.TEST: 'test'}
split_generators = [
datasets.SplitGenerator(name=split, gen_kwargs={"filepath": data_file[f"{data_dir}-{key}.{type}"]})
for split, key in splits_dict.items()
if f"{data_dir}-{key}.{type}" in data_file
]
return split_generators
def _info(self): return datasets.DatasetInfo(
citation=key_to_entry.get(datasets_and_citations.get(remove_type(self.config.name)),None),
features=(None if ".rels" in self.config.name else conllu_features)
)
def _generate_examples(self, filepath):
print(filepath)
corpus=self.config.name.split('.')[2]
run_args={
'corpus':corpus,
'rel_files': [v for k, v in self.files.items() if '.rels' in k],
'dep_files': [v for k, v in self.files.items() if '.conllu' in k],
**{k:v for k,v in self.gen_kwargs.items() if 'path' in k}
}
print('run_args',run_args)
if corpus in ['rstdt','pdtb','cdtb','gum','tdb'] and not self.preprocessed_underscores.get(corpus,False) and self.gen_kwargs.get('process_underscore',True):
run(**run_args)
self.preprocessed_underscores[corpus]=True
with open(filepath, encoding="utf-8") as f:
if "conllu" in self.config.name:
stream=parse_conll_stream(f)
for i, row in enumerate(stream):
row['seg']=map_seg(row['misc'])
row['doc_id']=row['doc_id'][0]
yield i,row
reader = csv.DictReader(f,delimiter='\t',quoting=csv.QUOTE_NONE)
for id_, row in enumerate(reader):
if id_ == 0:
continue
yield id_, row |