vesteinn commited on
Commit
e002e49
1 Parent(s): 4169d60

Added data and loading script

Browse files
Files changed (2) hide show
  1. fo.revised.txt +0 -0
  2. sosialurin-faroese-pos.py +505 -0
fo.revised.txt ADDED
The diff for this file is too large to render. See raw diff
sosialurin-faroese-pos.py ADDED
@@ -0,0 +1,505 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ # Modified by Vésteinn Snæbjarnarson 2021
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ # Lint as: python3
19
+
20
+
21
+
22
+
23
+
24
+
25
+ LABELS = [
26
+
27
+ "ACSFPA",
28
+ "ACSFPD",
29
+ "ACSFPN",
30
+ "ACSFSA",
31
+ "ACSFSD",
32
+ "ACSFSN",
33
+ "ACSMPA",
34
+ "ACSMPD",
35
+ "ACSMPN",
36
+ "ACSMSA",
37
+ "ACSMSD",
38
+ "ACSMSN",
39
+ "ACSNPA",
40
+ "ACSNPD",
41
+ "ACSNPN",
42
+ "ACSNSA",
43
+ "ACSNSD",
44
+ "ACSNSN",
45
+ "ACWFPA",
46
+ "ACWFPD",
47
+ "ACWFPN",
48
+ "ACWFSA",
49
+ "ACWFSD",
50
+ "ACWFSN",
51
+ "ACWMPD",
52
+ "ACWMPN",
53
+ "ACWMSA",
54
+ "ACWMSN",
55
+ "ACWNPA",
56
+ "ACWNPD",
57
+ "ACWNPN",
58
+ "ACWNSA",
59
+ "ACWNSN",
60
+ "AI",
61
+ "APSFPA",
62
+ "APSFPD",
63
+ "APSFPN",
64
+ "APSFSA",
65
+ "APSFSD",
66
+ "APSFSN",
67
+ "APSMPA",
68
+ "APSMPD",
69
+ "APSMPN",
70
+ "APSMSA",
71
+ "APSMSD",
72
+ "APSMSN",
73
+ "APSNPA",
74
+ "APSNPD",
75
+ "APSNPN",
76
+ "APSNSA",
77
+ "APSNSD",
78
+ "APSNSN",
79
+ "APWFPA",
80
+ "APWFPD",
81
+ "APWFPG",
82
+ "APWFPN",
83
+ "APWFSA",
84
+ "APWFSD",
85
+ "APWFSG",
86
+ "APWFSN",
87
+ "APWMPA",
88
+ "APWMPD",
89
+ "APWMPN",
90
+ "APWMSA",
91
+ "APWMSD",
92
+ "APWMSN",
93
+ "APWNPA",
94
+ "APWNPD",
95
+ "APWNPN",
96
+ "APWNSA",
97
+ "APWNSD",
98
+ "APWNSN",
99
+ "ASSFPD",
100
+ "ASSFPN",
101
+ "ASSFSA",
102
+ "ASSFSD",
103
+ "ASSFSN",
104
+ "ASSMPA",
105
+ "ASSMPN",
106
+ "ASSMSA",
107
+ "ASSMSD",
108
+ "ASSMSN",
109
+ "ASSNPA",
110
+ "ASSNPD",
111
+ "ASSNPN",
112
+ "ASSNSA",
113
+ "ASSNSD",
114
+ "ASSNSN",
115
+ "ASWFPA",
116
+ "ASWFPD",
117
+ "ASWFPN",
118
+ "ASWFSA",
119
+ "ASWFSD",
120
+ "ASWFSN",
121
+ "ASWMPA",
122
+ "ASWMPD",
123
+ "ASWMPN",
124
+ "ASWMSA",
125
+ "ASWMSD",
126
+ "ASWMSN",
127
+ "ASWNPA",
128
+ "ASWNPD",
129
+ "ASWNPN",
130
+ "ASWNSA",
131
+ "ASWNSD",
132
+ "ASWNSN",
133
+ "C",
134
+ "CI",
135
+ "CR",
136
+ "DCG",
137
+ "DCN",
138
+ "DG",
139
+ "DI",
140
+ "DN",
141
+ "DSG",
142
+ "DSN",
143
+ "F",
144
+ "KC",
145
+ "KE",
146
+ "KO",
147
+ "KQ",
148
+ "M",
149
+ "NC",
150
+ "NCFPA",
151
+ "NCFPD",
152
+ "NCFPN",
153
+ "NCFSA",
154
+ "NCFSN",
155
+ "NCMPA",
156
+ "NCMPD",
157
+ "NCMPG",
158
+ "NCMPN",
159
+ "NCMSA",
160
+ "NCMSN",
161
+ "NCNPA",
162
+ "NCNPD",
163
+ "NCNPN",
164
+ "NCNSA",
165
+ "NCNSD",
166
+ "NCNSN",
167
+ "NO",
168
+ "NP",
169
+ "NR",
170
+ "PBFPA",
171
+ "PBFPD",
172
+ "PBFPN",
173
+ "PBFSA",
174
+ "PBFSD",
175
+ "PBFSN",
176
+ "PBMPA",
177
+ "PBMPD",
178
+ "PBMPN",
179
+ "PBMSA",
180
+ "PBMSD",
181
+ "PBMSN",
182
+ "PBNPA",
183
+ "PBNPD",
184
+ "PBNPN",
185
+ "PBNSA",
186
+ "PBNSD",
187
+ "PBNSN",
188
+ "PDFPA",
189
+ "PDFPD",
190
+ "PDFPN",
191
+ "PDFSA",
192
+ "PDFSD",
193
+ "PDFSN",
194
+ "PDMPA",
195
+ "PDMPD",
196
+ "PDMPN",
197
+ "PDMSA",
198
+ "PDMSD",
199
+ "PDMSN",
200
+ "PDNPA",
201
+ "PDNPD",
202
+ "PDNPN",
203
+ "PDNSA",
204
+ "PDNSD",
205
+ "PDNSN",
206
+ "PEMPA",
207
+ "PEMSA",
208
+ "PENSA",
209
+ "PENSG",
210
+ "PIFPA",
211
+ "PIFPD",
212
+ "PIFPN",
213
+ "PIFSA",
214
+ "PIFSD",
215
+ "PIFSN",
216
+ "PIMPA",
217
+ "PIMPD",
218
+ "PIMPN",
219
+ "PIMSA",
220
+ "PIMSD",
221
+ "PIMSN",
222
+ "PINPA",
223
+ "PINPD",
224
+ "PINPN",
225
+ "PINSA",
226
+ "PINSD",
227
+ "PINSN",
228
+ "PP1PA",
229
+ "PP1PD",
230
+ "PP1PG",
231
+ "PP1PN",
232
+ "PP1SA",
233
+ "PP1SD",
234
+ "PP1SG",
235
+ "PP1SN",
236
+ "PP2PG",
237
+ "PP2PN",
238
+ "PP2SA",
239
+ "PP2SD",
240
+ "PP2SG",
241
+ "PP2SN",
242
+ "PPFPA",
243
+ "PPFPD",
244
+ "PPFPG",
245
+ "PPFPN",
246
+ "PPFSA",
247
+ "PPFSD",
248
+ "PPFSG",
249
+ "PPFSN",
250
+ "PPMPA",
251
+ "PPMPD",
252
+ "PPMPG",
253
+ "PPMPN",
254
+ "PPMSA",
255
+ "PPMSD",
256
+ "PPMSG",
257
+ "PPMSN",
258
+ "PPNPA",
259
+ "PPNPD",
260
+ "PPNPG",
261
+ "PPNPN",
262
+ "PPNSA",
263
+ "PPNSD",
264
+ "PPNSG",
265
+ "PPNSN",
266
+ "PQFPA",
267
+ "PQFPN",
268
+ "PQFSA",
269
+ "PQFSD",
270
+ "PQFSN",
271
+ "PQMPN",
272
+ "PQMSA",
273
+ "PQMSD",
274
+ "PQMSN",
275
+ "PQNSA",
276
+ "PQNSD",
277
+ "PQNSN",
278
+ "SFPA",
279
+ "SFPAA",
280
+ "SFPAP",
281
+ "SFPD",
282
+ "SFPDA",
283
+ "SFPDAP",
284
+ "SFPDP",
285
+ "SFPG",
286
+ "SFPGP",
287
+ "SFPN",
288
+ "SFPNA",
289
+ "SFPNP",
290
+ "SFSA",
291
+ "SFSAA",
292
+ "SFSAAP",
293
+ "SFSAP",
294
+ "SFSD",
295
+ "SFSDA",
296
+ "SFSDAP",
297
+ "SFSDP",
298
+ "SFSG",
299
+ "SFSGA",
300
+ "SFSGP",
301
+ "SFSN",
302
+ "SFSNA",
303
+ "SFSNAP",
304
+ "SFSNP",
305
+ "SMPA",
306
+ "SMPAA",
307
+ "SMPD",
308
+ "SMPDA",
309
+ "SMPDP",
310
+ "SMPG",
311
+ "SMPGA",
312
+ "SMPN",
313
+ "SMPNA",
314
+ "SMSA",
315
+ "SMSAA",
316
+ "SMSAP",
317
+ "SMSD",
318
+ "SMSDA",
319
+ "SMSDAP",
320
+ "SMSDP",
321
+ "SMSG",
322
+ "SMSGA",
323
+ "SMSGP",
324
+ "SMSN",
325
+ "SMSNA",
326
+ "SMSNAP",
327
+ "SMSNP",
328
+ "SNPA",
329
+ "SNPAA",
330
+ "SNPD",
331
+ "SNPDA",
332
+ "SNPDP",
333
+ "SNPG",
334
+ "SNPGA",
335
+ "SNPN",
336
+ "SNPNA",
337
+ "SNPNP",
338
+ "SNSA",
339
+ "SNSAA",
340
+ "SNSAAP",
341
+ "SNSAP",
342
+ "SNSD",
343
+ "SNSDA",
344
+ "SNSDAP",
345
+ "SNSDP",
346
+ "SNSG",
347
+ "SNSGA",
348
+ "SNSGP",
349
+ "SNSN",
350
+ "SNSNA",
351
+ "SNSNAP",
352
+ "SNSNP",
353
+ "SX",
354
+ "SXP",
355
+ "SXSD",
356
+ "SXSG",
357
+ "TS",
358
+ "TT",
359
+ "VAFPA",
360
+ "VAFPD",
361
+ "VAFPN",
362
+ "VAFSA",
363
+ "VAFSD",
364
+ "VAFSN",
365
+ "VAMPA",
366
+ "VAMPD",
367
+ "VAMPN",
368
+ "VAMSA",
369
+ "VAMSD",
370
+ "VAMSN",
371
+ "VANPA",
372
+ "VANPD",
373
+ "VANPN",
374
+ "VANSA",
375
+ "VANSD",
376
+ "VANSN",
377
+ "VE",
378
+ "VEAP",
379
+ "VEAS2",
380
+ "VEAS3",
381
+ "VEPP",
382
+ "VEPS1",
383
+ "VEPS2",
384
+ "VEPS3",
385
+ "VI",
386
+ "VMP",
387
+ "VMS",
388
+ "VNAP",
389
+ "VNAS1",
390
+ "VNAS2",
391
+ "VNAS3",
392
+ "VNPP",
393
+ "VNPS1",
394
+ "VNPS2",
395
+ "VNPS3",
396
+ "VP",
397
+ "W",
398
+ "X"
399
+ ]
400
+
401
+
402
+
403
+ import datasets
404
+
405
+
406
+ logger = datasets.logging.get_logger(__name__)
407
+
408
+
409
+ _CITATION = """\
410
+ @misc{sosialurin-pos,
411
+ title = {Marking av teldutøkum tekstsavn},
412
+ author = {Zakaris Svabo Hansen, Heini Justinussen, and Mortan
413
+ Ólason},
414
+ url = {http://ark.axeltra.com/index.php?type=person&lng=en&id=18},
415
+ year = {2004} }
416
+ """
417
+
418
+ _DESCRIPTION = """\
419
+ The corpus that has been created consists of ca. 100.000 words of text from the [Faroese] newspaper Sosialurin. Each word is tagged with grammatical information (word class, gender, number etc.)
420
+ """
421
+
422
+ _URL = "https://huggingface.co/datasets/vesteinn/sosialurin-faroese-pos/blob/main//"
423
+ _TRAINING_FILE = "fo.revised.txt"
424
+
425
+
426
+ class SosialurinPOSConfig(datasets.BuilderConfig):
427
+ """BuilderConfig for sosialurin-faroese-pos"""
428
+
429
+ def __init__(self, **kwargs):
430
+ """BuilderConfig for sosialurin-faroese-pos.
431
+ Args:
432
+ **kwargs: keyword arguments forwarded to super.
433
+ """
434
+ super(SosialurinPOSConfig, self).__init__(**kwargs)
435
+
436
+
437
+ class SosialurinPOS(datasets.GeneratorBasedBuilder):
438
+ """sosialurin-faroese-pos dataset."""
439
+
440
+ BUILDER_CONFIGS = [
441
+ SosialurinPOSConfig(name="sosialurin-faroese-pos", version=datasets.Version("0.1.0"), description="sosialurin-faroese-pos dataset"),
442
+ ]
443
+
444
+ def _info(self):
445
+ return datasets.DatasetInfo(
446
+ description=_DESCRIPTION,
447
+ features=datasets.Features(
448
+ {
449
+ "id": datasets.Value("string"),
450
+ "tokens": datasets.Sequence(datasets.Value("string")),
451
+ "ner_tags": datasets.Sequence(
452
+ datasets.features.ClassLabel(
453
+ names=LABELS
454
+ )
455
+ ),
456
+ }
457
+ ),
458
+ supervised_keys=None,
459
+ homepage="http://ark.axeltra.com/index.php?type=person&lng=en&id=18",
460
+ citation=_CITATION,
461
+ )
462
+
463
+ def _split_generators(self, dl_manager):
464
+ """Returns SplitGenerators."""
465
+ urls_to_download = {
466
+ "train": f"{_URL}{_TRAINING_FILE}",
467
+ }
468
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
469
+
470
+ return [
471
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
472
+ ]
473
+
474
+ def _generate_examples(self, filepath):
475
+ logger.info("⏳ Generating examples from = %s", filepath)
476
+ with open(filepath, encoding="utf-8") as f:
477
+ guid = 0
478
+ tokens = []
479
+ pos_tags = []
480
+ for line in f:
481
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
482
+ if tokens:
483
+ yield guid, {
484
+ "id": str(guid),
485
+ "tokens": tokens,
486
+ "pos_tags": pos_tags,
487
+ }
488
+ guid += 1
489
+ tokens = []
490
+ pos_tags = []
491
+ else:
492
+ # tokens are tab separated
493
+ splits = line.split("\t")
494
+ tokens.append(splits[0])
495
+ try:
496
+ ner_tags.append(splits[1].rstrip())
497
+ except:
498
+ print(splits)
499
+ raise
500
+ # last example
501
+ yield guid, {
502
+ "id": str(guid),
503
+ "tokens": tokens,
504
+ "pos_tags": pos_tags,
505
+ }