SuzanaB commited on
Commit
103a7d7
1 Parent(s): 023afe1
Files changed (3) hide show
  1. data.zip → data_ner.zip +0 -0
  2. data_ud.zip +0 -0
  3. hr500k.py +232 -112
data.zip → data_ner.zip RENAMED
Binary files a/data.zip and b/data_ner.zip differ
data_ud.zip ADDED
Binary file (2.16 MB). View file
hr500k.py CHANGED
@@ -33,74 +33,125 @@ are encoded as class labels.
33
  _HOMEPAGE = 'https://www.clarin.si/repository/xmlui/handle/11356/1183#'
34
  _LICENSE = ''
35
 
36
- _URL = 'https://huggingface.co/datasets/classla/hr500k/raw/main/data.zip'
37
- _TRAINING_FILE = 'train_ner.conllu'
38
- _DEV_FILE = 'dev_ner.conllu'
39
- _TEST_FILE = 'test_ner.conllu'
 
40
 
41
 
42
  class Hr500K(datasets.GeneratorBasedBuilder):
43
- VERSION = datasets.Version('1.0.0')
44
 
45
  BUILDER_CONFIGS = [
46
  datasets.BuilderConfig(
47
- name='hr500k',
 
 
 
 
 
 
 
 
 
 
48
  version=VERSION,
49
  description=''
50
  )
51
  ]
52
 
 
 
53
  def _info(self):
54
- features = datasets.Features(
55
- {
56
- 'sent_id': datasets.Value('string'),
57
- 'text': datasets.Value('string'),
58
- 'tokens': datasets.Sequence(datasets.Value('string')),
59
- 'lemmas': datasets.Sequence(datasets.Value('string')),
60
- 'xpos_tags': datasets.Sequence(datasets.Value('string')),
61
- 'upos_tags': datasets.Sequence(
62
- datasets.features.ClassLabel(
63
- names=[
64
- 'X',
65
- 'INTJ',
66
- 'VERB',
67
- 'PROPN',
68
- 'ADV',
69
- 'ADJ',
70
- 'PUNCT',
71
- 'PRON',
72
- 'DET',
73
- 'NUM',
74
- 'SYM',
75
- 'SCONJ',
76
- 'NOUN',
77
- 'AUX',
78
- 'PART',
79
- 'CCONJ',
80
- 'ADP'
81
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  )
83
- ),
84
- 'feats': datasets.Sequence(datasets.Value('string')),
85
- 'iob_tags': datasets.Sequence(
86
- datasets.features.ClassLabel(
87
- names=[
88
- 'I-org',
89
- 'B-misc',
90
- 'B-per',
91
- 'B-deriv-per',
92
- 'B-org',
93
- 'B-loc',
94
- 'I-deriv-per',
95
- 'I-misc',
96
- 'I-loc',
97
- 'I-per',
98
- 'O'
99
- ]
 
 
 
 
 
 
100
  )
101
- )
102
- }
103
- )
104
 
105
  return datasets.DatasetInfo(
106
  description=_DESCRIPTION,
@@ -113,78 +164,147 @@ class Hr500K(datasets.GeneratorBasedBuilder):
113
 
114
  def _split_generators(self, dl_manager):
115
  """Returns SplitGenerators."""
116
- data_dir = dl_manager.download_and_extract(_URL)
 
 
 
 
 
 
 
 
 
117
 
118
  return [
119
  datasets.SplitGenerator(
120
  name=datasets.Split.TRAIN, gen_kwargs={
121
- 'filepath': os.path.join(data_dir, _TRAINING_FILE),
122
  'split': 'train'}
123
  ),
124
  datasets.SplitGenerator(
125
  name=datasets.Split.VALIDATION, gen_kwargs={
126
- 'filepath': os.path.join(data_dir, _DEV_FILE),
127
  'split': 'dev'}
128
  ),
129
  datasets.SplitGenerator(
130
  name=datasets.Split.TEST, gen_kwargs={
131
- 'filepath': os.path.join(data_dir, _TEST_FILE),
132
  'split': 'test'}
133
  ),
134
  ]
135
 
136
  def _generate_examples(self, filepath, split):
137
- with open(filepath, encoding='utf-8') as f:
138
- sent_id = ''
139
- text = ''
140
- tokens = []
141
- lemmas = []
142
- xpos_tags = []
143
- upos_tags = []
144
- feats = []
145
- iob_tags = []
146
- data_id = 0
147
- for line in f:
148
- if line and not line == '\n':
149
- if line.startswith('#'):
150
- if line.startswith('# sent_id'):
151
- if tokens:
152
- yield data_id, {
153
- 'sent_id': sent_id,
154
- 'text': text,
155
- 'tokens': tokens,
156
- 'lemmas': lemmas,
157
- 'xpos_tags': xpos_tags,
158
- 'upos_tags': upos_tags,
159
- 'feats': feats,
160
- 'iob_tags': iob_tags
161
- }
162
- tokens = []
163
- lemmas = []
164
- xpos_tags = []
165
- upos_tags = []
166
- feats = []
167
- iob_tags = []
168
- data_id += 1
169
- sent_id = line.split(' = ')[1].strip()
170
- elif line.startswith('# text'):
171
- text = line.split(' = ')[1].strip()
172
- elif not line.startswith('_'):
173
- splits = line.split('\t')
174
- tokens.append(splits[1].strip())
175
- lemmas.append(splits[2].strip())
176
- xpos_tags.append(splits[3].strip())
177
- upos_tags.append(splits[4].strip())
178
- feats.append(splits[5].strip())
179
- iob_tags.append(splits[9].strip())
180
-
181
- yield data_id, {
182
- 'sent_id': sent_id,
183
- 'text': text,
184
- 'tokens': tokens,
185
- 'lemmas': lemmas,
186
- 'xpos_tags': xpos_tags,
187
- 'upos_tags': upos_tags,
188
- 'feats': feats,
189
- 'iob_tags': iob_tags
190
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  _HOMEPAGE = 'https://www.clarin.si/repository/xmlui/handle/11356/1183#'
34
  _LICENSE = ''
35
 
36
+ _URLs = {
37
+ 'ner': 'https://huggingface.co/datasets/classla/hr500k/raw/main/data_ner.zip',
38
+ 'upos': 'https://huggingface.co/datasets/classla/hr500k/raw/main/data_ner.zip',
39
+ 'ud': 'https://huggingface.co/datasets/classla/hr500k/raw/main/data_ud.zip'
40
+ }
41
 
42
 
43
  class Hr500K(datasets.GeneratorBasedBuilder):
44
+ VERSION = datasets.Version('1.0.1')
45
 
46
  BUILDER_CONFIGS = [
47
  datasets.BuilderConfig(
48
+ name='upos',
49
+ version=VERSION,
50
+ description=''
51
+ ),
52
+ datasets.BuilderConfig(
53
+ name='ner',
54
+ version=VERSION,
55
+ description=''
56
+ ),
57
+ datasets.BuilderConfig(
58
+ name='ud',
59
  version=VERSION,
60
  description=''
61
  )
62
  ]
63
 
64
+ DEFAULT_CONFIG_NAME = 'ner'
65
+
66
  def _info(self):
67
+ if self.config.name == "upos":
68
+ features = datasets.Features(
69
+ {
70
+ 'sent_id': datasets.Value('string'),
71
+ 'text': datasets.Value('string'),
72
+ 'tokens': datasets.Sequence(datasets.Value('string')),
73
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
74
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
75
+ 'upos_tags': datasets.Sequence(
76
+ datasets.features.ClassLabel(
77
+ names=[
78
+ 'X',
79
+ 'INTJ',
80
+ 'VERB',
81
+ 'PROPN',
82
+ 'ADV',
83
+ 'ADJ',
84
+ 'PUNCT',
85
+ 'PRON',
86
+ 'DET',
87
+ 'NUM',
88
+ 'SYM',
89
+ 'SCONJ',
90
+ 'NOUN',
91
+ 'AUX',
92
+ 'PART',
93
+ 'CCONJ',
94
+ 'ADP'
95
+ ]
96
+ )
97
+ ),
98
+ 'feats': datasets.Sequence(datasets.Value('string')),
99
+ 'iob_tags': datasets.Sequence(datasets.Value('string'))
100
+ }
101
+ )
102
+ elif self.config.name == "ner":
103
+ features = datasets.Features(
104
+ {
105
+ 'sent_id': datasets.Value('string'),
106
+ 'text': datasets.Value('string'),
107
+ 'tokens': datasets.Sequence(datasets.Value('string')),
108
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
109
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
110
+ 'upos_tags': datasets.Sequence(datasets.Value('string')),
111
+ 'feats': datasets.Sequence(datasets.Value('string')),
112
+ 'iob_tags': datasets.Sequence(
113
+ datasets.features.ClassLabel(
114
+ names=[
115
+ 'I-org',
116
+ 'B-misc',
117
+ 'B-per',
118
+ 'B-deriv-per',
119
+ 'B-org',
120
+ 'B-loc',
121
+ 'I-deriv-per',
122
+ 'I-misc',
123
+ 'I-loc',
124
+ 'I-per',
125
+ 'O'
126
+ ]
127
+ )
128
  )
129
+ }
130
+ )
131
+ else:
132
+ features = datasets.Features(
133
+ {
134
+ 'sent_id': datasets.Value('string'),
135
+ 'text': datasets.Value('string'),
136
+ 'tokens': datasets.Sequence(datasets.Value('string')),
137
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
138
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
139
+ 'upos_tags': datasets.Sequence(datasets.Value('string')),
140
+ 'feats': datasets.Sequence(datasets.Value('string')),
141
+ 'iob_tags': datasets.Sequence(datasets.Value('string')),
142
+ 'uds': datasets.Sequence(
143
+ datasets.features.ClassLabel(
144
+ names=[
145
+ 'det', 'aux_pass', 'list', 'cc', 'csubj', 'xcomp', 'nmod', 'dislocated', 'acl', 'fixed',
146
+ 'obj', 'dep', 'advmod_emph', 'goeswith', 'advmod', 'nsubj', 'punct', 'amod', 'expl_pv',
147
+ 'mark', 'obl', 'flat_foreign', 'conj', 'compound', 'expl', 'csubj_pass', 'appos',
148
+ 'case', 'advcl', 'parataxis', 'iobj', 'root', 'cop', 'aux', 'orphan', 'discourse',
149
+ 'nummod', 'nsubj_pass', 'vocative', 'flat', 'ccomp'
150
+ ]
151
+ )
152
  )
153
+ }
154
+ )
 
155
 
156
  return datasets.DatasetInfo(
157
  description=_DESCRIPTION,
164
 
165
  def _split_generators(self, dl_manager):
166
  """Returns SplitGenerators."""
167
+ data_dir = dl_manager.download_and_extract(_URLs[self.config.name])
168
+
169
+ if self.config.name == 'ud':
170
+ training_file = 'train_ner_ud.conllu'
171
+ dev_file = 'dev_ner_ud.conllu'
172
+ test_file = 'test_ner_ud.conllu'
173
+ else:
174
+ training_file = 'train_ner.conllu'
175
+ dev_file = 'dev_ner.conllu'
176
+ test_file = 'test_ner.conllu'
177
 
178
  return [
179
  datasets.SplitGenerator(
180
  name=datasets.Split.TRAIN, gen_kwargs={
181
+ 'filepath': os.path.join(data_dir, training_file),
182
  'split': 'train'}
183
  ),
184
  datasets.SplitGenerator(
185
  name=datasets.Split.VALIDATION, gen_kwargs={
186
+ 'filepath': os.path.join(data_dir, dev_file),
187
  'split': 'dev'}
188
  ),
189
  datasets.SplitGenerator(
190
  name=datasets.Split.TEST, gen_kwargs={
191
+ 'filepath': os.path.join(data_dir, test_file),
192
  'split': 'test'}
193
  ),
194
  ]
195
 
196
  def _generate_examples(self, filepath, split):
197
+ if self.config.name == 'ud':
198
+ with open(filepath, encoding='utf-8') as f:
199
+ sent_id = ''
200
+ text = ''
201
+ tokens = []
202
+ lemmas = []
203
+ xpos_tags = []
204
+ upos_tags = []
205
+ feats = []
206
+ iob_tags = []
207
+ uds = []
208
+ data_id = 0
209
+ for line in f:
210
+ if line and not line == '\n':
211
+ if line.startswith('#'):
212
+ if line.startswith('# sent_id'):
213
+ if tokens:
214
+ yield data_id, {
215
+ 'sent_id': sent_id,
216
+ 'text': text,
217
+ 'tokens': tokens,
218
+ 'lemmas': lemmas,
219
+ 'upos_tags': upos_tags,
220
+ 'xpos_tags': xpos_tags,
221
+ 'feats': feats,
222
+ 'iob_tags': iob_tags,
223
+ 'uds': uds
224
+ }
225
+ tokens = []
226
+ lemmas = []
227
+ upos_tags = []
228
+ xpos_tags = []
229
+ feats = []
230
+ iob_tags = []
231
+ uds = []
232
+ data_id += 1
233
+ sent_id = line.split(' = ')[1].strip()
234
+ elif line.startswith('# text'):
235
+ text = line.split(' = ')[1].strip()
236
+ elif not line.startswith('_'):
237
+ splits = line.split('\t')
238
+ tokens.append(splits[1].strip())
239
+ lemmas.append(splits[2].strip())
240
+ upos_tags.append(splits[3].strip())
241
+ xpos_tags.append(splits[4].strip())
242
+ feats.append(splits[5].strip())
243
+ uds.append(splits[7].strip())
244
+
245
+ yield data_id, {
246
+ 'sent_id': sent_id,
247
+ 'text': text,
248
+ 'tokens': tokens,
249
+ 'lemmas': lemmas,
250
+ 'upos_tags': upos_tags,
251
+ 'xpos_tags': xpos_tags,
252
+ 'feats': feats,
253
+ 'iob_tags': iob_tags,
254
+ 'uds': uds
255
+ }
256
+ else:
257
+ with open(filepath, encoding='utf-8') as f:
258
+ sent_id = ''
259
+ text = ''
260
+ tokens = []
261
+ lemmas = []
262
+ xpos_tags = []
263
+ upos_tags = []
264
+ feats = []
265
+ iob_tags = []
266
+ data_id = 0
267
+ for line in f:
268
+ if line and not line == '\n':
269
+ if line.startswith('#'):
270
+ if line.startswith('# sent_id'):
271
+ if tokens:
272
+ yield data_id, {
273
+ 'sent_id': sent_id,
274
+ 'text': text,
275
+ 'tokens': tokens,
276
+ 'lemmas': lemmas,
277
+ 'upos_tags': upos_tags,
278
+ 'xpos_tags': xpos_tags,
279
+ 'feats': feats,
280
+ 'iob_tags': iob_tags
281
+ }
282
+ tokens = []
283
+ lemmas = []
284
+ upos_tags = []
285
+ xpos_tags = []
286
+ feats = []
287
+ iob_tags = []
288
+ data_id += 1
289
+ sent_id = line.split(' = ')[1].strip()
290
+ elif line.startswith('# text'):
291
+ text = line.split(' = ')[1].strip()
292
+ elif not line.startswith('_'):
293
+ splits = line.split('\t')
294
+ tokens.append(splits[1].strip())
295
+ lemmas.append(splits[2].strip())
296
+ upos_tags.append(splits[3].strip())
297
+ xpos_tags.append(splits[4].strip())
298
+ feats.append(splits[5].strip())
299
+ iob_tags.append(splits[9].strip())
300
+
301
+ yield data_id, {
302
+ 'sent_id': sent_id,
303
+ 'text': text,
304
+ 'tokens': tokens,
305
+ 'lemmas': lemmas,
306
+ 'upos_tags': upos_tags,
307
+ 'xpos_tags': xpos_tags,
308
+ 'feats': feats,
309
+ 'iob_tags': iob_tags
310
+ }