Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
part-of-speech
Size:
100K - 1M
ArXiv:
License:
Update info
#1
by
izhx
- opened
- README.md +9 -0
- build_parquet.py +117 -0
README.md
CHANGED
@@ -219,3 +219,12 @@ configs:
|
|
219 |
path: zh/test*
|
220 |
|
221 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
path: zh/test*
|
220 |
|
221 |
---
|
222 |
+
|
223 |
+
## UDPOS of XTREME-R
|
224 |
+
|
225 |
+
Generated by [build_parquet.py](https://huggingface.co/datasets/izhx/xtreme-r-udpos/blob/main/build_parquet.py)
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
XTREME-R: Towards More Challenging and Nuanced Multilingual Evaluation
|
230 |
+
https://arxiv.org/abs/2104.07412
|
build_parquet.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""TODO(xtreme): Add a description here."""
|
2 |
+
|
3 |
+
import csv
|
4 |
+
import glob
|
5 |
+
import os
|
6 |
+
|
7 |
+
import datasets
|
8 |
+
|
9 |
+
|
10 |
+
_UD_POS_LANG = {
|
11 |
+
"Afrikaans": 'af',
|
12 |
+
"Arabic": 'ar',
|
13 |
+
"Basque": 'eu',
|
14 |
+
"Bulgarian": 'bg',
|
15 |
+
"Dutch": 'nl',
|
16 |
+
"English": 'en',
|
17 |
+
"Estonian": 'et',
|
18 |
+
"Finnish": 'fi',
|
19 |
+
"French": 'fr',
|
20 |
+
"German": 'de',
|
21 |
+
"Greek": 'el',
|
22 |
+
"Hebrew": 'he',
|
23 |
+
"Hindi": 'hi',
|
24 |
+
"Hungarian": 'hu',
|
25 |
+
"Indonesian": 'id',
|
26 |
+
"Italian": 'it',
|
27 |
+
"Japanese": 'ja',
|
28 |
+
"Kazakh": 'kk',
|
29 |
+
"Korean": 'ko',
|
30 |
+
"Chinese": 'zh',
|
31 |
+
"Marathi": 'mr',
|
32 |
+
"Persian": 'fa',
|
33 |
+
"Portuguese": 'pt',
|
34 |
+
"Russian": 'ru',
|
35 |
+
"Spanish": 'es',
|
36 |
+
"Tagalog": 'tl',
|
37 |
+
"Tamil": 'ta',
|
38 |
+
"Telugu": 'te',
|
39 |
+
"Thai": 'th',
|
40 |
+
"Turkish": 'tr',
|
41 |
+
"Urdu": 'ur',
|
42 |
+
"Vietnamese": 'vi',
|
43 |
+
"Yoruba": 'yo',
|
44 |
+
}
|
45 |
+
|
46 |
+
|
47 |
+
_DATA_URLS = {
|
48 |
+
"2_5": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz",
|
49 |
+
"2_7": 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3424/ud-treebanks-v2.7.tgz',
|
50 |
+
}
|
51 |
+
|
52 |
+
|
53 |
+
def generate_examples(folder: str, lang: str, split: str):
|
54 |
+
"""Yields examples."""
|
55 |
+
for path in glob.glob(f'{folder}/UD_{lang}*/*{split}.conllu'):
|
56 |
+
# For lang other than [see below], we exclude Arabic-NYUAD which does not contains any words, only _
|
57 |
+
if lang in ["Kazakh", "Tagalog", "Thai", "Yoruba"] or "NYUAD" not in path:
|
58 |
+
print('read', path)
|
59 |
+
with open(path) as file:
|
60 |
+
data = csv.reader(file, delimiter="\t", quoting=csv.QUOTE_NONE)
|
61 |
+
tokens = []
|
62 |
+
pos_tags = []
|
63 |
+
for id_row, row in enumerate(data):
|
64 |
+
if len(row) >= 10 and row[1] != "_" and row[3] != "_":
|
65 |
+
tokens.append(row[1])
|
66 |
+
pos_tags.append(row[3])
|
67 |
+
if len(row) == 0 and len(tokens) > 0:
|
68 |
+
yield {"tokens": tokens, "pos_tags": pos_tags}
|
69 |
+
tokens = []
|
70 |
+
pos_tags = []
|
71 |
+
|
72 |
+
|
73 |
+
def main():
|
74 |
+
features = datasets.Features({
|
75 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
76 |
+
"pos_tags": datasets.Sequence(datasets.features.ClassLabel(names=[
|
77 |
+
"ADJ",
|
78 |
+
"ADP",
|
79 |
+
"ADV",
|
80 |
+
"AUX",
|
81 |
+
"CCONJ",
|
82 |
+
"DET",
|
83 |
+
"INTJ",
|
84 |
+
"NOUN",
|
85 |
+
"NUM",
|
86 |
+
"PART",
|
87 |
+
"PRON",
|
88 |
+
"PROPN",
|
89 |
+
"PUNCT",
|
90 |
+
"SCONJ",
|
91 |
+
"SYM",
|
92 |
+
"VERB",
|
93 |
+
"X",
|
94 |
+
])),
|
95 |
+
})
|
96 |
+
|
97 |
+
path = 'ud-treebanks-v2.7/'
|
98 |
+
if '2.7' in path: # xtreme-r
|
99 |
+
_UD_POS_LANG.update({
|
100 |
+
'Lithuanian': 'lt', 'Polish': 'pl', 'Ukrainian': 'uk', 'Wolof': 'wo', 'Romanian': 'ro',
|
101 |
+
})
|
102 |
+
for lang, code in _UD_POS_LANG.items():
|
103 |
+
os.makedirs(f'{path}/parquet/{code}/', exist_ok=True)
|
104 |
+
splits = ['test'] if code != 'en' else ['train', 'dev', 'test']
|
105 |
+
for split in splits:
|
106 |
+
ds = datasets.Dataset.from_generator(
|
107 |
+
generate_examples, features=features, keep_in_memory=True, gen_kwargs=dict(
|
108 |
+
folder=path, lang=lang, split=split
|
109 |
+
)
|
110 |
+
)
|
111 |
+
sp = f'{path}/parquet/{code}/{split}.parquet'
|
112 |
+
ds.to_parquet(sp)
|
113 |
+
print('save', sp)
|
114 |
+
|
115 |
+
|
116 |
+
if __name__ == '__main__':
|
117 |
+
main()
|