File size: 7,062 Bytes
3c495f2 e0b06e3 c29c088 3c495f2 e0b06e3 3c495f2 65b65de e0b06e3 3c495f2 e0b06e3 459014c 3c495f2 e0b06e3 459014c e0b06e3 3c495f2 e0b06e3 7ba9dbe 2cf440c e0b06e3 3c495f2 e0b06e3 459014c e0b06e3 fda2265 e0b06e3 459014c 3c495f2 9b745e9 3c495f2 e0b06e3 3c495f2 e0b06e3 459014c e0b06e3 3c495f2 24af8d4 2cf440c fde9896 a444eea fde9896 b345365 2cf440c 8eac489 3c495f2 e0b06e3 459014c e0b06e3 3c495f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import datasets
import csv
import requests
import pandas as pd
import inspect
import copy
from .process_underscores import run
key_to_entry = requests.get('https://www.dropbox.com/scl/fi/85pnc7n6e4puoureavtzo/filtered_disrpt.json?rlkey=6cbgbe9vn2549eths7ah8gm7u&dl=1').json()
citation="\n".join(key_to_entry.values())
datasets_and_citations = {
"deu.rst.pcc": "stede-neumann-2014-potsdam",
"eng.dep.covdtb": "nishida-matsumoto-2022-domain",
"eng.dep.scidtb": "yang-li-2018-scidtb",
"eng.rst.gum": "Zeldes2017",
"eng.rst.rstdt": "carlson-etal-2001-building",
"eng.sdrt.stac": "asher-etal-2016-discourse",
"eus.rst.ert": "IruskietaAranzabeIlarrazaEtAl2013",
"fas.rst.prstc": "shahmohammadi2021persian",
"fra.sdrt.annodis": "afantenos-etal-2012-empirical",
"nld.rst.nldt": "redeker-etal-2012-multi",
"por.rst.cstn": "CardosoMazieroRosarioCastroJorgeEtAl2011",
"rus.rst.rrt": "toldova-etal-2017-rhetorical",
"spa.rst.rststb": "da-cunha-etal-2011-development",
"spa.rst.sctb": "cao-etal-2018-rst",
"zho.dep.scidtb": "yi-etal-2021-unifying,cheng-li-2019-zero",
"zho.rst.gcdt": "peng_gcdt_2022,peng_chinese_2022",
"zho.rst.sctb": "cao-etal-2018-rst",
"eng.pdtb.pdtb": "prasad-etal-2014-reflections",
"eng.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"ita.pdtb.luna": "tonelli-etal-2010-annotation,RiccardiStepanovChowdhury2016",
"por.pdtb.crpc": "CRPC-DB-Portuguese,genereux-etal-2012-introducing",
"por.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"tha.pdtb.tdtb": "",
"tur.pdtb.tdb": "zeyrek-webber-2008-discourse,zeyrek-kurfali-2017-tdb",
"tur.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"zho.pdtb.cdtb": "Zhou2014"
}
class Config(datasets.BuilderConfig):
citation=citation
files = [
"eng.dep.covdtb",
"eng.dep.scidtb",
"eng.pdtb.pdtb",
"eng.pdtb.tedm",
"eng.rst.gum",
"eng.rst.rstdt",
"eng.sdrt.stac",
"deu.rst.pcc",
"eus.rst.ert",
"fas.rst.prstc",
"fra.sdrt.annodis",
"ita.pdtb.luna",
"nld.rst.nldt",
"por.pdtb.crpc",
"por.pdtb.tedm",
"por.rst.cstn",
"rus.rst.rrt",
"spa.rst.rststb",
"spa.rst.sctb",
"tha.pdtb.tdtb",
"tur.pdtb.tdb",
"tur.pdtb.tedm",
"zho.dep.scidtb",
"zho.pdtb.cdtb",
"zho.rst.gcdt",
"zho.rst.sctb",
]
def parse_conll_stream(file_stream):
names = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc']
sentence = {name: [] for name in names}
for line in file_stream:
line = line.strip()
if line.startswith("#"):
continue
if not line:
if sentence['id']:
yield sentence
sentence = {name: [] for name in names}
continue
token_data = line.split('\t')
for name, value in zip(names, token_data):
sentence[name].append(value)
def get_kwarg_names(func):
return [k for k, v in inspect.signature(func).parameters.items() if v.default != v.empty]
_URLs = {f'{task}-{split}.{type}':f"https://raw.githubusercontent.com/disrpt/sharedtask2023/main/data/{task}/{task}_{split}.{type}" \
for task in files for split in 'train dev test'.split() for type in ['rels','conllu']}
#_URLs = {k:v for k,v in _URLs.items() if requests.get(v).status_code!=404}
conllu_features = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc', 'seg']
feature_type = {"seg":datasets.features.Sequence(
datasets.features.ClassLabel(names=["O","B-Segment"])),
'id':datasets.Value("string")}
conllu_features = datasets.Features({x:feature_type.get(x,datasets.Sequence(datasets.Value("string")))
for x in conllu_features})
def map_seg(x):
return [("B-Segment" if "beginseg=yes" in a.lower() else "O") for a in x]
def remove_type(x):
return x.replace(".rels","").replace(".conllu","")
class Dataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
Config(
name=f"{n}.{type}",
data_dir=f"{n}.{type}",
) for n in files for type in ["rels","conllu"]
]
def __init__(self,*args,**kwargs):
self.BUILDER_CONFIG_CLASS.__post_init__=lambda x:x
base_kwargs_names=get_kwarg_names(super().__init__)
gen_kwargs={}
self.files={}
self.preprocessed_underscores=dict()
for k,v in copy.deepcopy(kwargs).items():
if k not in base_kwargs_names:
gen_kwargs[k]=v
del kwargs[k]
self.gen_kwargs=gen_kwargs
return super().__init__(*args,**kwargs)
def _split_generators(self, dl_manager: datasets.DownloadManager):
cfg_name = self.config.name.rsplit('.', 1)[0]
data_dir = remove_type(self.config.data_dir)
print("datadir:",data_dir)
type = self.config.name.split('.')[-1]
urls={k:v for (k,v) in _URLs.items() if cfg_name in k and requests.get(v).status_code!=404}
data_file = dl_manager.download(urls)
self.files = {**self.files, **data_file}
train_key = data_dir+'-train'
print("datafile:",data_file, self.config.data_dir)
if train_key in data_file:
train=[datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file[train_key]})]
else:
train=[]
return train+[
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_file[data_dir+'-dev.'+type]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_file[data_dir+'-test.'+type]}),
]
def _info(self): return datasets.DatasetInfo(
citation=key_to_entry.get(datasets_and_citations.get(remove_type(self.config.name)),None),
features=(None if ".rels" in self.config.name else conllu_features)
)
def _generate_examples(self, filepath):
print(filepath)
corpus=self.config.name.split('.')[2]
run_args={
'corpus':corpus,
'rel_files': [v for k, v in self.files.items() if 'rels' in k],
'dep_files': [v for k, v in self.files.items() if 'conllu' in k]
}
print('run_args',run_args)
if corpus in ['rstdt','pdtb','cdtb','gum','tdb'] and not self.preprocessed_underscores.get(corpus,False):
run(**run_args)
self.preprocessed_underscores[corpus]=True
with open(filepath, encoding="utf-8") as f:
if "conllu" in self.config.name:
stream=parse_conll_stream(f)
for i, row in enumerate(stream):
row['seg']=map_seg(row['misc'])
yield i,row
reader = csv.DictReader(f,delimiter='\t',quoting=csv.QUOTE_NONE)
for id_, row in enumerate(reader):
if id_ == 0:
continue
yield id_, row |