SuzanaB commited on
Commit
ebd5757
1 Parent(s): ea10f70
Files changed (4) hide show
  1. data.zip +0 -0
  2. data_ner.zip +0 -0
  3. data_ud.zip +0 -0
  4. setimes_sr.py +232 -112
data.zip DELETED
Binary file (869 kB)
data_ner.zip ADDED
Binary file (873 kB). View file
data_ud.zip ADDED
Binary file (912 kB). View file
setimes_sr.py CHANGED
@@ -32,74 +32,125 @@ are encoded as class labels.
32
  _HOMEPAGE = ''
33
  _LICENSE = ''
34
 
35
- _URL = 'https://huggingface.co/datasets/classla/setimes_sr/raw/main/data.zip'
36
- _TRAINING_FILE = 'train_ner.conllu'
37
- _DEV_FILE = 'dev_ner.conllu'
38
- _TEST_FILE = 'test_ner.conllu'
 
39
 
40
 
41
  class SeTimesSr(datasets.GeneratorBasedBuilder):
42
- VERSION = datasets.Version('1.0.0')
43
 
44
  BUILDER_CONFIGS = [
45
  datasets.BuilderConfig(
46
- name='setimes_sr',
 
 
 
 
 
 
 
 
 
 
47
  version=VERSION,
48
  description=''
49
  )
50
  ]
51
 
 
 
52
  def _info(self):
53
- features = datasets.Features(
54
- {
55
- 'sent_id': datasets.Value('string'),
56
- 'text': datasets.Value('string'),
57
- 'tokens': datasets.Sequence(datasets.Value('string')),
58
- 'lemmas': datasets.Sequence(datasets.Value('string')),
59
- 'xpos_tags': datasets.Sequence(datasets.Value('string')),
60
- 'upos_tags': datasets.Sequence(
61
- datasets.features.ClassLabel(
62
- names=[
63
- 'X',
64
- 'INTJ',
65
- 'VERB',
66
- 'PROPN',
67
- 'ADV',
68
- 'ADJ',
69
- 'PUNCT',
70
- 'PRON',
71
- 'DET',
72
- 'NUM',
73
- 'SYM',
74
- 'SCONJ',
75
- 'NOUN',
76
- 'AUX',
77
- 'PART',
78
- 'CCONJ',
79
- 'ADP'
80
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  )
82
- ),
83
- 'feats': datasets.Sequence(datasets.Value('string')),
84
- 'iob_tags': datasets.Sequence(
85
- datasets.features.ClassLabel(
86
- names=[
87
- 'I-org',
88
- 'B-misc',
89
- 'B-per',
90
- 'B-deriv-per',
91
- 'B-org',
92
- 'B-loc',
93
- 'I-deriv-per',
94
- 'I-misc',
95
- 'I-loc',
96
- 'I-per',
97
- 'O'
98
- ]
 
 
 
 
 
 
99
  )
100
- )
101
- }
102
- )
103
 
104
  return datasets.DatasetInfo(
105
  description=_DESCRIPTION,
@@ -112,79 +163,148 @@ class SeTimesSr(datasets.GeneratorBasedBuilder):
112
 
113
  def _split_generators(self, dl_manager):
114
  """Returns SplitGenerators."""
115
- data_dir = dl_manager.download_and_extract(_URL)
 
 
 
 
 
 
 
 
 
116
 
117
  return [
118
  datasets.SplitGenerator(
119
  name=datasets.Split.TRAIN, gen_kwargs={
120
- 'filepath': os.path.join(data_dir, _TRAINING_FILE),
121
  'split': 'train'}
122
  ),
123
  datasets.SplitGenerator(
124
  name=datasets.Split.VALIDATION, gen_kwargs={
125
- 'filepath': os.path.join(data_dir, _DEV_FILE),
126
  'split': 'dev'}
127
  ),
128
  datasets.SplitGenerator(
129
  name=datasets.Split.TEST, gen_kwargs={
130
- 'filepath': os.path.join(data_dir, _TEST_FILE),
131
  'split': 'test'}
132
  ),
133
  ]
134
 
135
  def _generate_examples(self, filepath, split):
136
- with open(filepath, encoding='utf-8') as f:
137
- sent_id = ''
138
- text = ''
139
- tokens = []
140
- lemmas = []
141
- xpos_tags = []
142
- upos_tags = []
143
- feats = []
144
- iob_tags = []
145
- data_id = 0
146
- for line in f:
147
- if line and not line == '\n':
148
- if line.startswith('#'):
149
- if line.startswith('# sent_id'):
150
- if tokens:
151
- yield data_id, {
152
- 'sent_id': sent_id,
153
- 'text': text,
154
- 'tokens': tokens,
155
- 'lemmas': lemmas,
156
- 'xpos_tags': xpos_tags,
157
- 'upos_tags': upos_tags,
158
- 'feats': feats,
159
- 'iob_tags': iob_tags
160
- }
161
- tokens = []
162
- lemmas = []
163
- xpos_tags = []
164
- upos_tags = []
165
- feats = []
166
- iob_tags = []
167
- data_id += 1
168
- sent_id = line.split(' = ')[1].strip()
169
- elif line.startswith('# text'):
170
- text = line.split(' = ')[1].strip()
171
- elif not line.startswith('_'):
172
- splits = line.split('\t')
173
- tokens.append(splits[1].strip())
174
- lemmas.append(splits[2].strip())
175
- xpos_tags.append(splits[3].strip())
176
- upos_tags.append(splits[4].strip())
177
- feats.append(splits[5].strip())
178
- iob_tags.append(splits[9].strip())
179
-
180
- yield data_id, {
181
- 'sent_id': sent_id,
182
- 'text': text,
183
- 'tokens': tokens,
184
- 'lemmas': lemmas,
185
- 'xpos_tags': xpos_tags,
186
- 'upos_tags': upos_tags,
187
- 'feats': feats,
188
- 'iob_tags': iob_tags
189
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
32
  _HOMEPAGE = ''
33
  _LICENSE = ''
34
 
35
+ _URLs = {
36
+ 'ner': 'https://huggingface.co/datasets/classla/setimes_sr/raw/main/data_ner.zip',
37
+ 'upos': 'https://huggingface.co/datasets/classla/setimes_sr/raw/main/data_ner.zip',
38
+ 'ud': 'https://huggingface.co/datasets/classla/setimes_sr/raw/main/data_ud.zip'
39
+ }
40
 
41
 
42
  class SeTimesSr(datasets.GeneratorBasedBuilder):
43
+ VERSION = datasets.Version('1.0.1')
44
 
45
  BUILDER_CONFIGS = [
46
  datasets.BuilderConfig(
47
+ name='upos',
48
+ version=VERSION,
49
+ description=''
50
+ ),
51
+ datasets.BuilderConfig(
52
+ name='ner',
53
+ version=VERSION,
54
+ description=''
55
+ ),
56
+ datasets.BuilderConfig(
57
+ name='ud',
58
  version=VERSION,
59
  description=''
60
  )
61
  ]
62
 
63
+ DEFAULT_CONFIG_NAME = 'ner'
64
+
65
  def _info(self):
66
+ if self.config.name == "upos":
67
+ features = datasets.Features(
68
+ {
69
+ 'sent_id': datasets.Value('string'),
70
+ 'text': datasets.Value('string'),
71
+ 'tokens': datasets.Sequence(datasets.Value('string')),
72
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
73
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
74
+ 'upos_tags': datasets.Sequence(
75
+ datasets.features.ClassLabel(
76
+ names=[
77
+ 'X',
78
+ 'INTJ',
79
+ 'VERB',
80
+ 'PROPN',
81
+ 'ADV',
82
+ 'ADJ',
83
+ 'PUNCT',
84
+ 'PRON',
85
+ 'DET',
86
+ 'NUM',
87
+ 'SYM',
88
+ 'SCONJ',
89
+ 'NOUN',
90
+ 'AUX',
91
+ 'PART',
92
+ 'CCONJ',
93
+ 'ADP'
94
+ ]
95
+ )
96
+ ),
97
+ 'feats': datasets.Sequence(datasets.Value('string')),
98
+ 'iob_tags': datasets.Sequence(datasets.Value('string'))
99
+ }
100
+ )
101
+ elif self.config.name == "ner":
102
+ features = datasets.Features(
103
+ {
104
+ 'sent_id': datasets.Value('string'),
105
+ 'text': datasets.Value('string'),
106
+ 'tokens': datasets.Sequence(datasets.Value('string')),
107
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
108
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
109
+ 'upos_tags': datasets.Sequence(datasets.Value('string')),
110
+ 'feats': datasets.Sequence(datasets.Value('string')),
111
+ 'iob_tags': datasets.Sequence(
112
+ datasets.features.ClassLabel(
113
+ names=[
114
+ 'I-org',
115
+ 'B-misc',
116
+ 'B-per',
117
+ 'B-deriv-per',
118
+ 'B-org',
119
+ 'B-loc',
120
+ 'I-deriv-per',
121
+ 'I-misc',
122
+ 'I-loc',
123
+ 'I-per',
124
+ 'O'
125
+ ]
126
+ )
127
  )
128
+ }
129
+ )
130
+ else:
131
+ features = datasets.Features(
132
+ {
133
+ 'sent_id': datasets.Value('string'),
134
+ 'text': datasets.Value('string'),
135
+ 'tokens': datasets.Sequence(datasets.Value('string')),
136
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
137
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
138
+ 'upos_tags': datasets.Sequence(datasets.Value('string')),
139
+ 'feats': datasets.Sequence(datasets.Value('string')),
140
+ 'iob_tags': datasets.Sequence(datasets.Value('string')),
141
+ 'uds': datasets.Sequence(
142
+ datasets.features.ClassLabel(
143
+ names=[
144
+ 'det', 'aux_pass', 'list', 'cc', 'csubj', 'xcomp', 'nmod', 'dislocated', 'acl', 'fixed',
145
+ 'obj', 'dep', 'advmod_emph', 'goeswith', 'advmod', 'nsubj', 'punct', 'amod', 'expl_pv',
146
+ 'mark', 'obl', 'flat_foreign', 'conj', 'compound', 'expl', 'csubj_pass', 'appos',
147
+ 'case', 'advcl', 'parataxis', 'iobj', 'root', 'cop', 'aux', 'orphan', 'discourse',
148
+ 'nummod', 'nsubj_pass', 'vocative', 'flat', 'ccomp'
149
+ ]
150
+ )
151
  )
152
+ }
153
+ )
 
154
 
155
  return datasets.DatasetInfo(
156
  description=_DESCRIPTION,
163
 
164
  def _split_generators(self, dl_manager):
165
  """Returns SplitGenerators."""
166
+ data_dir = dl_manager.download_and_extract(_URLs[self.config.name])
167
+
168
+ if self.config.name == 'ud':
169
+ training_file = 'train_ner_ud.conllu'
170
+ dev_file = 'dev_ner_ud.conllu'
171
+ test_file = 'test_ner_ud.conllu'
172
+ else:
173
+ training_file = 'train_ner.conllu'
174
+ dev_file = 'dev_ner.conllu'
175
+ test_file = 'test_ner.conllu'
176
 
177
  return [
178
  datasets.SplitGenerator(
179
  name=datasets.Split.TRAIN, gen_kwargs={
180
+ 'filepath': os.path.join(data_dir, training_file),
181
  'split': 'train'}
182
  ),
183
  datasets.SplitGenerator(
184
  name=datasets.Split.VALIDATION, gen_kwargs={
185
+ 'filepath': os.path.join(data_dir, dev_file),
186
  'split': 'dev'}
187
  ),
188
  datasets.SplitGenerator(
189
  name=datasets.Split.TEST, gen_kwargs={
190
+ 'filepath': os.path.join(data_dir, test_file),
191
  'split': 'test'}
192
  ),
193
  ]
194
 
195
  def _generate_examples(self, filepath, split):
196
+ if self.config.name == 'ud':
197
+ with open(filepath, encoding='utf-8') as f:
198
+ sent_id = ''
199
+ text = ''
200
+ tokens = []
201
+ lemmas = []
202
+ xpos_tags = []
203
+ upos_tags = []
204
+ feats = []
205
+ iob_tags = []
206
+ uds = []
207
+ data_id = 0
208
+ for line in f:
209
+ if line and not line == '\n':
210
+ if line.startswith('#'):
211
+ if line.startswith('# sent_id'):
212
+ if tokens:
213
+ yield data_id, {
214
+ 'sent_id': sent_id,
215
+ 'text': text,
216
+ 'tokens': tokens,
217
+ 'lemmas': lemmas,
218
+ 'upos_tags': upos_tags,
219
+ 'xpos_tags': xpos_tags,
220
+ 'feats': feats,
221
+ 'iob_tags': iob_tags,
222
+ 'uds': uds
223
+ }
224
+ tokens = []
225
+ lemmas = []
226
+ upos_tags = []
227
+ xpos_tags = []
228
+ feats = []
229
+ iob_tags = []
230
+ uds = []
231
+ data_id += 1
232
+ sent_id = line.split(' = ')[1].strip()
233
+ elif line.startswith('# text'):
234
+ text = line.split(' = ')[1].strip()
235
+ elif not line.startswith('_'):
236
+ splits = line.split('\t')
237
+ tokens.append(splits[1].strip())
238
+ lemmas.append(splits[2].strip())
239
+ upos_tags.append(splits[3].strip())
240
+ xpos_tags.append(splits[4].strip())
241
+ feats.append(splits[5].strip())
242
+ uds.append(splits[7].strip())
243
+
244
+ yield data_id, {
245
+ 'sent_id': sent_id,
246
+ 'text': text,
247
+ 'tokens': tokens,
248
+ 'lemmas': lemmas,
249
+ 'upos_tags': upos_tags,
250
+ 'xpos_tags': xpos_tags,
251
+ 'feats': feats,
252
+ 'iob_tags': iob_tags,
253
+ 'uds': uds
254
+ }
255
+ else:
256
+ with open(filepath, encoding='utf-8') as f:
257
+ sent_id = ''
258
+ text = ''
259
+ tokens = []
260
+ lemmas = []
261
+ xpos_tags = []
262
+ upos_tags = []
263
+ feats = []
264
+ iob_tags = []
265
+ data_id = 0
266
+ for line in f:
267
+ if line and not line == '\n':
268
+ if line.startswith('#'):
269
+ if line.startswith('# sent_id'):
270
+ if tokens:
271
+ yield data_id, {
272
+ 'sent_id': sent_id,
273
+ 'text': text,
274
+ 'tokens': tokens,
275
+ 'lemmas': lemmas,
276
+ 'upos_tags': upos_tags,
277
+ 'xpos_tags': xpos_tags,
278
+ 'feats': feats,
279
+ 'iob_tags': iob_tags
280
+ }
281
+ tokens = []
282
+ lemmas = []
283
+ upos_tags = []
284
+ xpos_tags = []
285
+ feats = []
286
+ iob_tags = []
287
+ data_id += 1
288
+ sent_id = line.split(' = ')[1].strip()
289
+ elif line.startswith('# text'):
290
+ text = line.split(' = ')[1].strip()
291
+ elif not line.startswith('_'):
292
+ splits = line.split('\t')
293
+ tokens.append(splits[1].strip())
294
+ lemmas.append(splits[2].strip())
295
+ upos_tags.append(splits[3].strip())
296
+ xpos_tags.append(splits[4].strip())
297
+ feats.append(splits[5].strip())
298
+ iob_tags.append(splits[9].strip())
299
+
300
+ yield data_id, {
301
+ 'sent_id': sent_id,
302
+ 'text': text,
303
+ 'tokens': tokens,
304
+ 'lemmas': lemmas,
305
+ 'upos_tags': upos_tags,
306
+ 'xpos_tags': xpos_tags,
307
+ 'feats': feats,
308
+ 'iob_tags': iob_tags
309
+ }
310