SuzanaB commited on
Commit
0e7b167
1 Parent(s): d3c436a

Add data and dataset loading script

Browse files
Files changed (3) hide show
  1. data_ner.zip +3 -0
  2. data_ud.zip +3 -0
  3. ssj500k.py +306 -0
data_ner.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a57b6413a7fc87612ba3dc4b8b42a4ccd090edd1bad0894ec9a36453449077f
3
+ size 2034756
data_ud.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51075b8f311dbd0e493ac8a1cab4f824bd8dc4e3ad212d108494a5c147c889e8
3
+ size 1289684
ssj500k.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the 'License');
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an 'AS IS' BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = ''
23
+ _DESCRIPTION = """
24
+ """
25
+ _HOMEPAGE = ''
26
+ _LICENSE = ''
27
+
28
+ _URLs = {
29
+ 'ner': 'https://huggingface.co/datasets/classla/ssj500k/raw/main/data_ner.zip',
30
+ 'upos': 'https://huggingface.co/datasets/classla/ssj500k/raw/main/data_ner.zip',
31
+ 'ud': 'https://huggingface.co/datasets/classla/ssj500k/raw/main/data_ud.zip'
32
+ }
33
+
34
+ _DATA_DIRS = {
35
+ 'ner': 'data_ner',
36
+ 'upos': 'data_ner',
37
+ 'ud': 'data_ud'
38
+ }
39
+
40
+
41
+ class Ssj500K(datasets.GeneratorBasedBuilder):
42
+ VERSION = datasets.Version('1.0.0')
43
+
44
+ BUILDER_CONFIGS = [
45
+ datasets.BuilderConfig(
46
+ name='upos',
47
+ version=VERSION,
48
+ description=''
49
+ ),
50
+ datasets.BuilderConfig(
51
+ name='ner',
52
+ version=VERSION,
53
+ description=''
54
+ ),
55
+ datasets.BuilderConfig(
56
+ name='ud',
57
+ version=VERSION,
58
+ description=''
59
+ )
60
+ ]
61
+
62
+ DEFAULT_CONFIG_NAME = 'ner'
63
+
64
+ def _info(self):
65
+ if self.config.name == "upos":
66
+ features = datasets.Features(
67
+ {
68
+ 'sent_id': datasets.Value('string'),
69
+ 'text': datasets.Value('string'),
70
+ 'tokens': datasets.Sequence(datasets.Value('string')),
71
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
72
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
73
+ 'upos_tags': datasets.Sequence(
74
+ datasets.features.ClassLabel(
75
+ names=[
76
+ 'SCONJ',
77
+ 'ADP',
78
+ 'ADV',
79
+ 'NUM',
80
+ 'ADJ',
81
+ 'PRON',
82
+ 'DET',
83
+ 'X',
84
+ 'PART',
85
+ 'NOUN',
86
+ 'CCONJ',
87
+ 'PROPN',
88
+ 'PUNCT',
89
+ 'AUX',
90
+ 'VERB',
91
+ 'INTJ'
92
+ ]
93
+ )
94
+ ),
95
+ 'feats': datasets.Sequence(datasets.Value('string')),
96
+ 'iob_tags': datasets.Sequence(datasets.Value('string'))
97
+ }
98
+ )
99
+ elif self.config.name == "ner":
100
+ features = datasets.Features(
101
+ {
102
+ 'sent_id': datasets.Value('string'),
103
+ 'text': datasets.Value('string'),
104
+ 'tokens': datasets.Sequence(datasets.Value('string')),
105
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
106
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
107
+ 'upos_tags': datasets.Sequence(datasets.Value('string')),
108
+ 'feats': datasets.Sequence(datasets.Value('string')),
109
+ 'iob_tags': datasets.Sequence(
110
+ datasets.features.ClassLabel(
111
+ names=[
112
+ 'I-per',
113
+ 'O',
114
+ 'I-org',
115
+ 'B-loc',
116
+ 'B-deriv-per',
117
+ 'I-loc',
118
+ 'I-deriv-per',
119
+ 'B-org',
120
+ 'B-per',
121
+ 'B-misc',
122
+ 'I-misc'
123
+ ]
124
+ )
125
+ )
126
+ }
127
+ )
128
+ else:
129
+ features = datasets.Features(
130
+ {
131
+ 'sent_id': datasets.Value('string'),
132
+ 'text': datasets.Value('string'),
133
+ 'tokens': datasets.Sequence(datasets.Value('string')),
134
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
135
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
136
+ 'upos_tags': datasets.Sequence(datasets.Value('string')),
137
+ 'feats': datasets.Sequence(datasets.Value('string')),
138
+ 'iob_tags': datasets.Sequence(datasets.Value('string')),
139
+ 'uds': datasets.Sequence(
140
+ datasets.features.ClassLabel(
141
+ names=[
142
+ 'nsubj', 'root', 'csubj', 'flat', 'aux', 'fixed', 'ccomp', 'discourse', 'nmod', 'amod',
143
+ 'obj', 'nummod', 'iobj', 'mark', 'advmod', 'xcomp', 'acl', 'obl', 'flat_foreign', 'det',
144
+ 'cop', 'cc', 'advcl', 'expl', 'flat_name', 'appos', 'cc_preconj', 'parataxis', 'conj',
145
+ 'punct', 'case', 'dep'
146
+ ]
147
+ )
148
+ )
149
+ }
150
+ )
151
+
152
+ return datasets.DatasetInfo(
153
+ description=_DESCRIPTION,
154
+ features=features,
155
+ supervised_keys=None,
156
+ homepage=_HOMEPAGE,
157
+ license=_LICENSE,
158
+ citation=_CITATION,
159
+ )
160
+
161
+ def _split_generators(self, dl_manager):
162
+ """Returns SplitGenerators."""
163
+ data_dir = os.path.join(dl_manager.download_and_extract(_URLs[self.config.name]), _DATA_DIRS[self.config.name])
164
+
165
+ if self.config.name == 'ud':
166
+ training_file = 'train_ner_ud.conllup'
167
+ dev_file = 'dev_ner_ud.conllup'
168
+ test_file = 'test_ner_ud.conllup'
169
+ else:
170
+ training_file = 'train_ner.conllu'
171
+ dev_file = 'dev_ner.conllu'
172
+ test_file = 'test_ner.conllu'
173
+
174
+ return [
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.TRAIN, gen_kwargs={
177
+ 'filepath': os.path.join(data_dir, training_file),
178
+ 'split': 'train'}
179
+ ),
180
+ datasets.SplitGenerator(
181
+ name=datasets.Split.VALIDATION, gen_kwargs={
182
+ 'filepath': os.path.join(data_dir, dev_file),
183
+ 'split': 'dev'}
184
+ ),
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.TEST, gen_kwargs={
187
+ 'filepath': os.path.join(data_dir, test_file),
188
+ 'split': 'test'}
189
+ ),
190
+ ]
191
+
192
+ def _generate_examples(self, filepath, split):
193
+ if self.config.name == 'ud':
194
+ with open(filepath, encoding='utf-8') as f:
195
+ sent_id = ''
196
+ text = ''
197
+ tokens = []
198
+ lemmas = []
199
+ xpos_tags = []
200
+ upos_tags = []
201
+ feats = []
202
+ iob_tags = []
203
+ uds = []
204
+ data_id = 0
205
+ for line in f:
206
+ if line and not line == '\n' and not line.startswith('# global.columns'):
207
+ if line.startswith('#'):
208
+ if line.startswith('# sent_id'):
209
+ if tokens:
210
+ yield data_id, {
211
+ 'sent_id': sent_id,
212
+ 'text': text,
213
+ 'tokens': tokens,
214
+ 'lemmas': lemmas,
215
+ 'upos_tags': upos_tags,
216
+ 'xpos_tags': xpos_tags,
217
+ 'feats': feats,
218
+ 'iob_tags': iob_tags,
219
+ 'uds': uds
220
+ }
221
+ tokens = []
222
+ lemmas = []
223
+ upos_tags = []
224
+ xpos_tags = []
225
+ feats = []
226
+ iob_tags = []
227
+ uds = []
228
+ data_id += 1
229
+ sent_id = line.split(' = ')[1].strip()
230
+ elif line.startswith('# text'):
231
+ text = line.split(' = ')[1].strip()
232
+ elif not line.startswith('_'):
233
+ splits = line.split('\t')
234
+ tokens.append(splits[1].strip())
235
+ lemmas.append(splits[2].strip())
236
+ upos_tags.append(splits[3].strip())
237
+ xpos_tags.append(splits[4].strip())
238
+ feats.append(splits[5].strip())
239
+ uds.append(splits[7].strip())
240
+
241
+ yield data_id, {
242
+ 'sent_id': sent_id,
243
+ 'text': text,
244
+ 'tokens': tokens,
245
+ 'lemmas': lemmas,
246
+ 'upos_tags': upos_tags,
247
+ 'xpos_tags': xpos_tags,
248
+ 'feats': feats,
249
+ 'iob_tags': iob_tags,
250
+ 'uds': uds
251
+ }
252
+ else:
253
+ with open(filepath, encoding='utf-8') as f:
254
+ sent_id = ''
255
+ text = ''
256
+ tokens = []
257
+ lemmas = []
258
+ xpos_tags = []
259
+ upos_tags = []
260
+ feats = []
261
+ iob_tags = []
262
+ data_id = 0
263
+ for line in f:
264
+ if line and not line == '\n':
265
+ if line.startswith('#'):
266
+ if line.startswith('# sent_id'):
267
+ if tokens:
268
+ yield data_id, {
269
+ 'sent_id': sent_id,
270
+ 'text': text,
271
+ 'tokens': tokens,
272
+ 'lemmas': lemmas,
273
+ 'upos_tags': upos_tags,
274
+ 'xpos_tags': xpos_tags,
275
+ 'feats': feats,
276
+ 'iob_tags': iob_tags
277
+ }
278
+ tokens = []
279
+ lemmas = []
280
+ upos_tags = []
281
+ xpos_tags = []
282
+ feats = []
283
+ iob_tags = []
284
+ data_id += 1
285
+ sent_id = line.split(' = ')[1].strip()
286
+ elif line.startswith('# text'):
287
+ text = line.split(' = ')[1].strip()
288
+ elif not line.startswith('_'):
289
+ splits = line.split('\t')
290
+ tokens.append(splits[1].strip())
291
+ lemmas.append(splits[2].strip())
292
+ upos_tags.append(splits[3].strip())
293
+ xpos_tags.append(splits[4].strip())
294
+ feats.append(splits[5].strip())
295
+ iob_tags.append(splits[9].strip())
296
+
297
+ yield data_id, {
298
+ 'sent_id': sent_id,
299
+ 'text': text,
300
+ 'tokens': tokens,
301
+ 'lemmas': lemmas,
302
+ 'upos_tags': upos_tags,
303
+ 'xpos_tags': xpos_tags,
304
+ 'feats': feats,
305
+ 'iob_tags': iob_tags
306
+ }