SuzanaB commited on
Commit
fc712a4
1 Parent(s): 0559063

Add data and code

Browse files
Files changed (2) hide show
  1. data.zip +0 -0
  2. janes_tag.py +154 -0
data.zip ADDED
Binary file (856 kB). View file
 
janes_tag.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the 'License');
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an 'AS IS' BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = ''
23
+ _DESCRIPTION = """
24
+ """
25
+ _HOMEPAGE = ''
26
+ _LICENSE = ''
27
+
28
+ _URL = 'https://huggingface.co/datasets/classla/janes_tag/raw/main/data.zip'
29
+ _TRAINING_FILE = 'train_all.conllup'
30
+ _DEV_FILE = 'dev_all.conllup'
31
+ _TEST_FILE = 'test_all.conllup'
32
+ _DATA_DIR = 'data'
33
+
34
+
35
+ class JanesTag(datasets.GeneratorBasedBuilder):
36
+ VERSION = datasets.Version('1.0.0')
37
+
38
+ BUILDER_CONFIGS = [
39
+ datasets.BuilderConfig(
40
+ name='janes_tag',
41
+ version=VERSION,
42
+ description=''
43
+ )
44
+ ]
45
+
46
+ def _info(self):
47
+ features = datasets.Features(
48
+ {
49
+ 'sent_id': datasets.Value('string'),
50
+ 'tokens': datasets.Sequence(datasets.Value('string')),
51
+ 'norms': datasets.Sequence(datasets.Value('string')),
52
+ 'lemmas': datasets.Sequence(datasets.Value('string')),
53
+ 'xpos_tags': datasets.Sequence(datasets.Value('string')),
54
+ 'feats': datasets.Sequence(datasets.Value('string')),
55
+ 'upos_tags': datasets.Sequence(
56
+ datasets.features.ClassLabel(
57
+ names=[
58
+ 'SCONJ VERB', 'NOUN', 'NOUN NOUN', 'CCONJ SCONJ', 'ADV X', 'ADJ', 'NOUN NUM', 'ADP VERB',
59
+ 'CCONJ', 'SCONJ AUX', 'VERB', 'PRON PRON', 'CCONJ PART', 'ADV ADJ', 'PRON AUX', 'AUX AUX',
60
+ 'VERB ADP', 'DET ADJ', 'ADJ NOUN', 'PART PART', 'ADV AUX', 'NOUN ADV', 'PART CCONJ',
61
+ 'DET NOUN', 'CCONJ CCONJ', 'ADV', 'NUM', 'AUX NUM', 'ADV DET', 'ADV ADV', 'PRON VERB',
62
+ 'ADP PRON', 'DET AUX', 'VERB ADV', 'PROPN PROPN', 'NOUN PROPN', 'ADJ ADP', 'PART AUX',
63
+ 'PROPN NOUN', 'PROPN ADV', 'ADP NOUN', 'NUM ADV', 'NOUN ADJ', 'SCONJ', 'PART NOUN',
64
+ 'ADV NUM', 'VERB PRON', 'PART ADJ', 'AUX', 'ADP NUM', 'PRON', 'ADP ADJ', 'INTJ', 'ADV VERB',
65
+ 'NOUN SYM', 'PART', 'ADV PART', 'DET VERB', 'SCONJ PART', 'ADV SCONJ', 'NOUN CCONJ',
66
+ 'NUM DET', 'ADP X', 'INTJ X', 'NOUN VERB', 'PUNCT', 'ADP', 'ADV CCONJ', 'NOUN DET',
67
+ 'X NOUN', 'DET', 'PROPN X', 'SYM', 'PROPN NUM', 'PART VERB', 'SYM INTJ', 'ADP ADV',
68
+ 'X PROPN', 'X X', 'PROPN', 'ADP DET', 'X', 'AUX ADV', 'NUM NOUN'
69
+ ]
70
+ )
71
+ )
72
+ }
73
+ )
74
+
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=features,
78
+ supervised_keys=None,
79
+ homepage=_HOMEPAGE,
80
+ license=_LICENSE,
81
+ citation=_CITATION,
82
+ )
83
+
84
+ def _split_generators(self, dl_manager):
85
+ """Returns SplitGenerators."""
86
+ data_dir = os.path.join(dl_manager.download_and_extract(_URL), _DATA_DIR)
87
+
88
+ return [
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TRAIN, gen_kwargs={
91
+ 'filepath': os.path.join(data_dir, _TRAINING_FILE),
92
+ 'split': 'train'}
93
+ ),
94
+ datasets.SplitGenerator(
95
+ name=datasets.Split.VALIDATION, gen_kwargs={
96
+ 'filepath': os.path.join(data_dir, _DEV_FILE),
97
+ 'split': 'dev'}
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TEST, gen_kwargs={
101
+ 'filepath': os.path.join(data_dir, _TEST_FILE),
102
+ 'split': 'test'}
103
+ ),
104
+ ]
105
+
106
+ def _generate_examples(self, filepath, split):
107
+ with open(filepath, encoding='utf-8') as f:
108
+ sent_id = ''
109
+ tokens = []
110
+ norms = []
111
+ lemmas = []
112
+ xpos_tags = []
113
+ feats = []
114
+ upos_tags = []
115
+ data_id = 0
116
+ for line in f:
117
+ if line and line != '\n' and not line.startswith('# global.columns'):
118
+ if line.startswith('# sent_id'):
119
+ if tokens:
120
+ yield data_id, {
121
+ 'sent_id': sent_id,
122
+ 'tokens': tokens,
123
+ 'norms': norms,
124
+ 'lemmas': lemmas,
125
+ 'xpos_tags': xpos_tags,
126
+ 'feats': feats,
127
+ 'upos_tags': upos_tags
128
+ }
129
+ tokens = []
130
+ norms = []
131
+ lemmas = []
132
+ xpos_tags = []
133
+ feats = []
134
+ upos_tags = []
135
+ data_id += 1
136
+ sent_id = line.split(' = ')[1].strip()
137
+ else:
138
+ splits = line.split('\t')
139
+ tokens.append(splits[1].strip())
140
+ norms.append(splits[2].strip())
141
+ lemmas.append(splits[3].strip())
142
+ upos_tags.append(splits[4].strip())
143
+ xpos_tags.append(splits[5].strip())
144
+ feats.append(splits[6].strip())
145
+
146
+ yield data_id, {
147
+ 'sent_id': sent_id,
148
+ 'tokens': tokens,
149
+ 'norms': norms,
150
+ 'lemmas': lemmas,
151
+ 'xpos_tags': xpos_tags,
152
+ 'feats': feats,
153
+ 'upos_tags': upos_tags
154
+ }