SuzanaB commited on
Commit
9f1906a
1 Parent(s): d1c9690
Files changed (2) hide show
  1. data.zip +0 -0
  2. reldi_sr.py +158 -0
data.zip ADDED
Binary file (792 kB). View file
 
reldi_sr.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the 'License');
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an 'AS IS' BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = ''
23
+ _DESCRIPTION = """The dataset contains 5462 training samples, 711 validation samples and 725 test samples.
24
+ Each sample represents a sentence and includes the following features: sentence ID ('sent_id'),
25
+ list of tokens ('tokens'), list of lemmas ('lemmas'), list of UPOS tags ('upos_tags'),
26
+ list of Multext-East tags ('xpos_tags), list of morphological features ('feats'),
27
+ and list of IOB tags ('iob_tags'), which are encoded as class labels.
28
+ """
29
+ _HOMEPAGE = ''
30
+ _LICENSE = ''
31
+
32
+ _URL = 'https://huggingface.co/datasets/classla/reldi_sr/raw/main/data.zip'
33
+ _TRAINING_FILE = 'train_ner.conllu'
34
+ _DEV_FILE = 'dev_ner.conllu'
35
+ _TEST_FILE = 'test_ner.conllu'
36
+
37
+
38
+ class ReldiSr(datasets.GeneratorBasedBuilder):
39
+ VERSION = datasets.Version('1.0.0')
40
+
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(
43
+ name='reldi_sr',
44
+ version=VERSION,
45
+ description=''
46
+ )
47
+ ]
48
+
49
+ def _info(self):
50
+ features = datasets.Features(
51
+ {
52
+ 'sent_id': datasets.Value('string'),
53
+ 'tokens': datasets.Sequence(datasets.Value('string')),
54
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
55
+ 'upos_tags': datasets.Sequence(datasets.Value('string')),
56
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
57
+ 'feats': datasets.Sequence(datasets.Value('string')),
58
+ 'iob_tags': datasets.Sequence(
59
+ datasets.features.ClassLabel(
60
+ names=[
61
+ 'I-org',
62
+ 'B-misc',
63
+ 'B-per',
64
+ 'B-deriv-per',
65
+ 'B-org',
66
+ 'B-loc',
67
+ 'I-misc',
68
+ 'I-loc',
69
+ 'I-per',
70
+ 'O',
71
+ 'I-*',
72
+ 'B-*'
73
+ ]
74
+ )
75
+ )
76
+ }
77
+ )
78
+
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=features,
82
+ supervised_keys=None,
83
+ homepage=_HOMEPAGE,
84
+ license=_LICENSE,
85
+ citation=_CITATION,
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ """Returns SplitGenerators."""
90
+ data_dir = dl_manager.download_and_extract(_URL)
91
+
92
+ return [
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TRAIN, gen_kwargs={
95
+ 'filepath': os.path.join(data_dir, _TRAINING_FILE),
96
+ 'split': 'train'}
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.VALIDATION, gen_kwargs={
100
+ 'filepath': os.path.join(data_dir, _DEV_FILE),
101
+ 'split': 'dev'}
102
+ ),
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.TEST, gen_kwargs={
105
+ 'filepath': os.path.join(data_dir, _TEST_FILE),
106
+ 'split': 'test'}
107
+ ),
108
+ ]
109
+
110
+ def _generate_examples(self, filepath, split):
111
+ with open(filepath, encoding='utf-8') as f:
112
+ sent_id = ''
113
+ tokens = []
114
+ lemmas = []
115
+ upos_tags = []
116
+ xpos_tags = []
117
+ feats = []
118
+ iob_tags = []
119
+ data_id = 0
120
+ for line in f:
121
+ if line and not line == '\n':
122
+ if line.startswith('# sent_id'):
123
+ if tokens:
124
+ yield data_id, {
125
+ 'sent_id': sent_id,
126
+ 'tokens': tokens,
127
+ 'lemmas': lemmas,
128
+ 'upos_tags': upos_tags,
129
+ 'xpos_tags': xpos_tags,
130
+ 'feats': feats,
131
+ 'iob_tags': iob_tags
132
+ }
133
+ tokens = []
134
+ lemmas = []
135
+ upos_tags = []
136
+ xpos_tags = []
137
+ feats = []
138
+ iob_tags = []
139
+ data_id += 1
140
+ sent_id = line.split(' = ')[1].strip()
141
+ else:
142
+ splits = line.split('\t')
143
+ tokens.append(splits[1].strip())
144
+ lemmas.append(splits[2].strip())
145
+ upos_tags.append(splits[3].strip())
146
+ xpos_tags.append(splits[4].strip())
147
+ feats.append(splits[5].strip())
148
+ iob_tags.append(splits[9].strip())
149
+
150
+ yield data_id, {
151
+ 'sent_id': sent_id,
152
+ 'tokens': tokens,
153
+ 'lemmas': lemmas,
154
+ 'upos_tags': upos_tags,
155
+ 'xpos_tags': xpos_tags,
156
+ 'feats': feats,
157
+ 'iob_tags': iob_tags
158
+ }