ktagowski commited on
Commit
6f50cd5
1 Parent(s): 9af21af

Add loader

Browse files
Files changed (1) hide show
  1. nkjp-pos.py +164 -0
nkjp-pos.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """NKJP-POS tagging dataset."""
18
+
19
+ import csv
20
+ from typing import List, Tuple, Dict, Generator
21
+
22
+ import datasets
23
+
24
+ _DESCRIPTION = """NKJP-POS tagging dataset."""
25
+
26
+ _URLS = {
27
+ "train": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/train.tsv",
28
+ "validation": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/valid.tsv",
29
+ "test": "https://huggingface.co/datasets/clarin-pl/nkjp-pos/resolve/main/data/test.tsv",
30
+ }
31
+
32
+ _HOMEPAGE = "http://clip.ipipan.waw.pl/NationalCorpusOfPolish"
33
+
34
+ _POS_TAGS = [
35
+ 'adj',
36
+ 'adja',
37
+ 'adjc',
38
+ 'adjp',
39
+ 'adv',
40
+ 'aglt',
41
+ 'bedzie',
42
+ 'brev',
43
+ 'burk',
44
+ 'comp',
45
+ 'conj',
46
+ 'depr',
47
+ 'fin',
48
+ 'ger',
49
+ 'imps',
50
+ 'impt',
51
+ 'inf',
52
+ 'interj',
53
+ 'interp',
54
+ 'num',
55
+ 'numcol',
56
+ 'pact',
57
+ 'pant',
58
+ 'pcon',
59
+ 'ppas',
60
+ 'ppron12',
61
+ 'ppron3',
62
+ 'praet',
63
+ 'pred',
64
+ 'prep',
65
+ 'qub',
66
+ 'siebie',
67
+ 'subst',
68
+ 'winien',
69
+ 'xxx'
70
+ ]
71
+
72
+
73
+ class NKJPPOS(datasets.GeneratorBasedBuilder):
74
+
75
+ def _info(self) -> datasets.DatasetInfo:
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=datasets.Features(
79
+ {
80
+ "tokens": datasets.Sequence(datasets.Value("string")),
81
+ "morph": datasets.Sequence(datasets.Value("string")),
82
+ "lemmas": datasets.Sequence(datasets.Value("string")),
83
+ "pos_tags": datasets.Sequence(datasets.features.ClassLabel(
84
+ names=_POS_TAGS,
85
+ num_classes=len(_POS_TAGS)
86
+ )),
87
+ "full_pos_tags": datasets.Sequence(
88
+ datasets.Value("string")),
89
+ "nps": datasets.Sequence(datasets.Value("string")),
90
+ }
91
+ ),
92
+ homepage=_HOMEPAGE,
93
+ )
94
+
95
+ def _split_generators(
96
+ self, dl_manager: datasets.DownloadManager
97
+ ) -> List[datasets.SplitGenerator]:
98
+ urls_to_download = _URLS
99
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN,
103
+ gen_kwargs={"filepath": downloaded_files["train"]},
104
+ ),
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.VALIDATION,
107
+ gen_kwargs={"filepath": downloaded_files["validation"]},
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TEST,
111
+ gen_kwargs={"filepath": downloaded_files["test"]},
112
+ ),
113
+ ]
114
+
115
+ @staticmethod
116
+ def _parse_tag(
117
+ tag: str
118
+ ) -> Tuple[str, str]:
119
+ full_tag = tag
120
+ pos_tag = tag.split(':')[0]
121
+
122
+ return pos_tag, full_tag
123
+
124
+ def _generate_examples(
125
+ self, filepath: str
126
+ ) -> Generator[Tuple[int, Dict[str, str]], None, None]:
127
+ with open(filepath, 'r') as f:
128
+ reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
129
+
130
+ tokens = []
131
+ morph = []
132
+ tags = []
133
+ full_tags = []
134
+ lemma = []
135
+ nps = []
136
+ gid = 0
137
+
138
+ for line in reader:
139
+ if not line:
140
+ yield gid, {
141
+ 'tokens': tokens,
142
+ 'morph': morph,
143
+ 'pos_tags': tags,
144
+ 'full_pos_tags': full_tags,
145
+ 'lemmas': lemma,
146
+ 'nps': nps
147
+ }
148
+ gid += 1
149
+ tokens = []
150
+ morph = []
151
+ tags = []
152
+ full_tags = []
153
+ lemma = []
154
+ nps = []
155
+
156
+ else:
157
+ tokens.append(line[0])
158
+ morph.append(line[1])
159
+ lemma.append(line[3])
160
+ nps.append(line[4])
161
+ tag, full_tag = self._parse_tag(line[2])
162
+
163
+ tags.append(tag)
164
+ full_tags.append(full_tag)