SuzanaB commited on
Commit
a7f96f1
1 Parent(s): 8f3d87a
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. dev_ner.conllu +3 -0
  3. hr500k.py +180 -0
  4. test_ner.conllu +3 -0
  5. train_ner.conllu +3 -0
.gitattributes CHANGED
@@ -14,3 +14,4 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ *.conllu filter=lfs diff=lfs merge=lfs -text
dev_ner.conllu ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d3ea0605657af9721b5e2eea04e0ea4510a764c503b09879826d5b254807345
3
+ size 2770096
hr500k.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the 'License');
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an 'AS IS' BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import datasets
18
+
19
+
20
+ _CITATION = ''
21
+ _DESCRIPTION = """The hr500k training corpus contains about 500,000 tokens manually annotated on the levels of
22
+ tokenisation, sentence segmentation, morphosyntactic tagging, lemmatisation and named entities.
23
+
24
+ On the sentence level, the dataset contains 20159 training samples, 1963 validation samples and 2672 test samples
25
+ across the respective data splits. Each sample represents a sentence and includes the following features:
26
+ sentence ID ('sent_id'), sentence text ('text'), list of tokens ('tokens'), list of lemmas ('lemmas'),
27
+ list of Multext-East tags ('xpos_tags), list of UPOS tags ('upos_tags'),
28
+ list of morphological features ('feats'), and list of IOB tags ('iob_tags'). The 'upos_tags' and 'iob_tags' features
29
+ are encoded as class labels.
30
+ """
31
+ _HOMEPAGE = 'https://www.clarin.si/repository/xmlui/handle/11356/1183#'
32
+ _LICENSE = ''
33
+
34
+ _TRAINING_FILE = 'train_ner.conllu'
35
+ _DEV_FILE = 'dev_ner.conllu'
36
+ _TEST_FILE = 'test_ner.conllu'
37
+
38
+
39
+ class Hr500K(datasets.GeneratorBasedBuilder):
40
+ VERSION = datasets.Version('1.1.0')
41
+
42
+ BUILDER_CONFIGS = [
43
+ datasets.BuilderConfig(
44
+ name='hr500k',
45
+ version=VERSION,
46
+ data_files=[_TRAINING_FILE, _DEV_FILE, _TEST_FILE],
47
+ description=''
48
+ )
49
+ ]
50
+
51
+ def _info(self):
52
+ features = datasets.Features(
53
+ {
54
+ 'sent_id': datasets.Value('string'),
55
+ 'text': datasets.Value('string'),
56
+ 'tokens': datasets.Sequence(datasets.Value('string')),
57
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
58
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
59
+ 'upos_tags': datasets.Sequence(
60
+ datasets.features.ClassLabel(
61
+ names=[
62
+ 'X',
63
+ 'INTJ',
64
+ 'VERB',
65
+ 'PROPN',
66
+ 'ADV',
67
+ 'ADJ',
68
+ 'PUNCT',
69
+ 'PRON',
70
+ 'DET',
71
+ 'NUM',
72
+ 'SYM',
73
+ 'SCONJ',
74
+ 'NOUN',
75
+ 'AUX',
76
+ 'PART',
77
+ 'CCONJ',
78
+ 'ADP'
79
+ ]
80
+ )
81
+ ),
82
+ 'feats': datasets.Sequence(datasets.Value('string')),
83
+ 'iob_tags': datasets.Sequence(
84
+ datasets.features.ClassLabel(
85
+ names=[
86
+ 'I-org',
87
+ 'B-misc',
88
+ 'B-per',
89
+ 'B-deriv-per',
90
+ 'B-org',
91
+ 'B-loc',
92
+ 'I-deriv-per',
93
+ 'I-misc',
94
+ 'I-loc',
95
+ 'I-per',
96
+ 'O'
97
+ ]
98
+ )
99
+ )
100
+ }
101
+ )
102
+
103
+ return datasets.DatasetInfo(
104
+ description=_DESCRIPTION,
105
+ features=features,
106
+ supervised_keys=None,
107
+ homepage=_HOMEPAGE,
108
+ license=_LICENSE,
109
+ citation=_CITATION,
110
+ )
111
+
112
+ def _split_generators(self, dl_manager):
113
+ return [
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TRAIN, gen_kwargs={'filepath': _TRAINING_FILE, 'split': 'train'}
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.VALIDATION, gen_kwargs={'filepath': _DEV_FILE, 'split': 'dev'}
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TEST, gen_kwargs={'filepath': _TEST_FILE, 'split': 'test'}
122
+ ),
123
+ ]
124
+
125
+ def _generate_examples(self, filepath, split):
126
+ with open(filepath, encoding='utf-8') as f:
127
+ sent_id = ''
128
+ text = ''
129
+ tokens = []
130
+ lemmas = []
131
+ xpos_tags = []
132
+ upos_tags = []
133
+ feats = []
134
+ iob_tags = []
135
+ data_id = 0
136
+ for line in f:
137
+ if line and not line == '\n':
138
+ if line.startswith('#'):
139
+ if line.startswith('# sent_id'):
140
+ if tokens:
141
+ yield data_id, {
142
+ 'sent_id': sent_id,
143
+ 'text': text,
144
+ 'tokens': tokens,
145
+ 'lemmas': lemmas,
146
+ 'xpos_tags': xpos_tags,
147
+ 'upos_tags': upos_tags,
148
+ 'feats': feats,
149
+ 'iob_tags': iob_tags
150
+ }
151
+ tokens = []
152
+ lemmas = []
153
+ xpos_tags = []
154
+ upos_tags = []
155
+ feats = []
156
+ iob_tags = []
157
+ data_id += 1
158
+ sent_id = line.split(' = ')[1].strip()
159
+ elif line.startswith('# text'):
160
+ text = line.split(' = ')[1].strip()
161
+ elif not line.startswith('_'):
162
+ splits = line.split('\t')
163
+ tokens.append(splits[1].strip())
164
+ lemmas.append(splits[2].strip())
165
+ xpos_tags.append(splits[3].strip())
166
+ upos_tags.append(splits[4].strip())
167
+ feats.append(splits[5].strip())
168
+ iob_tags.append(splits[9].strip())
169
+
170
+ yield data_id, {
171
+ 'sent_id': sent_id,
172
+ 'text': text,
173
+ 'tokens': tokens,
174
+ 'lemmas': lemmas,
175
+ 'xpos_tags': xpos_tags,
176
+ 'upos_tags': upos_tags,
177
+ 'feats': feats,
178
+ 'iob_tags': iob_tags
179
+ }
180
+
test_ner.conllu ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dda18e6ed47510ab4de23642db8a744e99ecfac3c28d29545128bcd11b4bad2e
3
+ size 3524103
train_ner.conllu ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c248a1e78551300b3bcafcd98bdd72287adb584d8aad057ec5f719806ea75bf6
3
+ size 28938043