SuzanaB commited on
Commit
ea10f70
1 Parent(s): 59bbe2c
Files changed (2) hide show
  1. data.zip +0 -0
  2. setimes_sr.py +190 -0
data.zip ADDED
Binary file (869 kB). View file
setimes_sr.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the 'License');
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an 'AS IS' BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = ''
23
+ _DESCRIPTION = """SETimes_sr is a Serbian dataset annotated for morphosyntactic information and named entities.
24
+
25
+ The dataset contains 3177 training samples, 395 validation samples and 319 test samples
26
+ across the respective data splits. Each sample represents a sentence and includes the following features:
27
+ sentence ID ('sent_id'), sentence text ('text'), list of tokens ('tokens'), list of lemmas ('lemmas'),
28
+ list of Multext-East tags ('xpos_tags), list of UPOS tags ('upos_tags'),
29
+ list of morphological features ('feats'), and list of IOB tags ('iob_tags'). The 'upos_tags' and 'iob_tags' features
30
+ are encoded as class labels.
31
+ """
32
+ _HOMEPAGE = ''
33
+ _LICENSE = ''
34
+
35
+ _URL = 'https://huggingface.co/datasets/classla/setimes_sr/raw/main/data.zip'
36
+ _TRAINING_FILE = 'train_ner.conllu'
37
+ _DEV_FILE = 'dev_ner.conllu'
38
+ _TEST_FILE = 'test_ner.conllu'
39
+
40
+
41
+ class SeTimesSr(datasets.GeneratorBasedBuilder):
42
+ VERSION = datasets.Version('1.0.0')
43
+
44
+ BUILDER_CONFIGS = [
45
+ datasets.BuilderConfig(
46
+ name='setimes_sr',
47
+ version=VERSION,
48
+ description=''
49
+ )
50
+ ]
51
+
52
+ def _info(self):
53
+ features = datasets.Features(
54
+ {
55
+ 'sent_id': datasets.Value('string'),
56
+ 'text': datasets.Value('string'),
57
+ 'tokens': datasets.Sequence(datasets.Value('string')),
58
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
59
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
60
+ 'upos_tags': datasets.Sequence(
61
+ datasets.features.ClassLabel(
62
+ names=[
63
+ 'X',
64
+ 'INTJ',
65
+ 'VERB',
66
+ 'PROPN',
67
+ 'ADV',
68
+ 'ADJ',
69
+ 'PUNCT',
70
+ 'PRON',
71
+ 'DET',
72
+ 'NUM',
73
+ 'SYM',
74
+ 'SCONJ',
75
+ 'NOUN',
76
+ 'AUX',
77
+ 'PART',
78
+ 'CCONJ',
79
+ 'ADP'
80
+ ]
81
+ )
82
+ ),
83
+ 'feats': datasets.Sequence(datasets.Value('string')),
84
+ 'iob_tags': datasets.Sequence(
85
+ datasets.features.ClassLabel(
86
+ names=[
87
+ 'I-org',
88
+ 'B-misc',
89
+ 'B-per',
90
+ 'B-deriv-per',
91
+ 'B-org',
92
+ 'B-loc',
93
+ 'I-deriv-per',
94
+ 'I-misc',
95
+ 'I-loc',
96
+ 'I-per',
97
+ 'O'
98
+ ]
99
+ )
100
+ )
101
+ }
102
+ )
103
+
104
+ return datasets.DatasetInfo(
105
+ description=_DESCRIPTION,
106
+ features=features,
107
+ supervised_keys=None,
108
+ homepage=_HOMEPAGE,
109
+ license=_LICENSE,
110
+ citation=_CITATION,
111
+ )
112
+
113
+ def _split_generators(self, dl_manager):
114
+ """Returns SplitGenerators."""
115
+ data_dir = dl_manager.download_and_extract(_URL)
116
+
117
+ return [
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TRAIN, gen_kwargs={
120
+ 'filepath': os.path.join(data_dir, _TRAINING_FILE),
121
+ 'split': 'train'}
122
+ ),
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.VALIDATION, gen_kwargs={
125
+ 'filepath': os.path.join(data_dir, _DEV_FILE),
126
+ 'split': 'dev'}
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TEST, gen_kwargs={
130
+ 'filepath': os.path.join(data_dir, _TEST_FILE),
131
+ 'split': 'test'}
132
+ ),
133
+ ]
134
+
135
+ def _generate_examples(self, filepath, split):
136
+ with open(filepath, encoding='utf-8') as f:
137
+ sent_id = ''
138
+ text = ''
139
+ tokens = []
140
+ lemmas = []
141
+ xpos_tags = []
142
+ upos_tags = []
143
+ feats = []
144
+ iob_tags = []
145
+ data_id = 0
146
+ for line in f:
147
+ if line and not line == '\n':
148
+ if line.startswith('#'):
149
+ if line.startswith('# sent_id'):
150
+ if tokens:
151
+ yield data_id, {
152
+ 'sent_id': sent_id,
153
+ 'text': text,
154
+ 'tokens': tokens,
155
+ 'lemmas': lemmas,
156
+ 'xpos_tags': xpos_tags,
157
+ 'upos_tags': upos_tags,
158
+ 'feats': feats,
159
+ 'iob_tags': iob_tags
160
+ }
161
+ tokens = []
162
+ lemmas = []
163
+ xpos_tags = []
164
+ upos_tags = []
165
+ feats = []
166
+ iob_tags = []
167
+ data_id += 1
168
+ sent_id = line.split(' = ')[1].strip()
169
+ elif line.startswith('# text'):
170
+ text = line.split(' = ')[1].strip()
171
+ elif not line.startswith('_'):
172
+ splits = line.split('\t')
173
+ tokens.append(splits[1].strip())
174
+ lemmas.append(splits[2].strip())
175
+ xpos_tags.append(splits[3].strip())
176
+ upos_tags.append(splits[4].strip())
177
+ feats.append(splits[5].strip())
178
+ iob_tags.append(splits[9].strip())
179
+
180
+ yield data_id, {
181
+ 'sent_id': sent_id,
182
+ 'text': text,
183
+ 'tokens': tokens,
184
+ 'lemmas': lemmas,
185
+ 'xpos_tags': xpos_tags,
186
+ 'upos_tags': upos_tags,
187
+ 'feats': feats,
188
+ 'iob_tags': iob_tags
189
+ }
190
+