File size: 2,799 Bytes
650fa1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# coding=utf-8
"""CEN dataset."""

import csv
import datasets

_DESCRIPTION = "CEN dataset."

_URLS = {
    "train": "https://huggingface.co/datasets/clarin-knext/cen/resolve/main/data/train.iob",
    "valid": "https://huggingface.co/datasets/clarin-knext/cen/resolve/main/data/valid.iob",
    "test": "https://huggingface.co/datasets/clarin-knext/cen/resolve/main/data/test.iob",
}

_HOMEPAGE = "https://clarin-pl.eu/dspace/handle/11321/6"

with open('data/n82_tagset.txt', 'r') as fin:
    _N82_TAGS = fin.read().split('\n')

_NER_IOB_TAGS = ['O']

for tag in _N82_TAGS:
    _NER_IOB_TAGS.extend([f'B-{tag}', f'I-{tag}'])


class CenDataset(datasets.GeneratorBasedBuilder):

    def _info(self) -> datasets.DatasetInfo:
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "tokens": datasets.Sequence(datasets.Value('string')),
                    "lemmas": datasets.Sequence(datasets.Value('string')),
                    "mstags": datasets.Sequence(datasets.Value('string')),
                    "ner": datasets.Sequence(datasets.features.ClassLabel(names=_NER_IOB_TAGS))
                }
            ),
            homepage=_HOMEPAGE
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager):
        downloaded_files = dl_manager.download_and_extract(_URLS)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': downloaded_files['valid']}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files['test']})
        ]

    def _generate_examples(self, filepath: str):
        with open(filepath, 'r', encoding='utf-8') as fin:
            reader = csv.reader(fin, delimiter='\t', quoting=csv.QUOTE_NONE)

            tokens = []
            lemmas = []
            mstags = []
            ner = []
            gid = 0

            for line in reader:
                if not line:
                    yield gid, {
                        "tokens": tokens,
                        "lemmas": lemmas,
                        "mstags": mstags,
                        "ner": ner
                    }
                    gid += 1
                    tokens = []
                    lemmas = []
                    mstags = []
                    ner = []
                
                elif len(line) == 1:  # ignore --DOCSTART lines
                    continue

                else:
                    tokens.append(line[0])
                    lemmas.append(line[1])
                    mstags.append(line[2])
                    ner.append(line[3])