NazaGara commited on
Commit
bc269db
1 Parent(s): 5f413ac

Upload wikiner-es.py

Browse files
Files changed (1) hide show
  1. wikiner-es.py +206 -0
wikiner-es.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+
18
+ import datasets
19
+
20
+
21
+ _CITATION = """\
22
+ @inproceedings{,
23
+ title = "",
24
+ author = "Garagiola, Nazareno",
25
+ year = "2022",
26
+ url = ""
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """Dataset used to train a NER model"""
31
+ _URL = "https://raw.githubusercontent.com/NazaGara/betoNER/main/data/wikiner/"
32
+ _TRAINING_FILE = "train.conllu"
33
+
34
+
35
+ class WikinerConfig(datasets.BuilderConfig):
36
+ """BuilderConfig"""
37
+
38
+ def __init__(self, **kwargs):
39
+ """BuilderConfig
40
+ Args:
41
+ **kwargs: keyword arguments forwarded to super.
42
+ """
43
+ super(WikinerConfig, self).__init__(**kwargs)
44
+
45
+
46
+ class Wikiner(datasets.GeneratorBasedBuilder):
47
+ """Wikiner dataset."""
48
+
49
+ BUILDER_CONFIGS = [
50
+ WikinerConfig(
51
+ name="wikiner",
52
+ version=datasets.Version("1.1.0"),
53
+ description="wikiner dataset",
54
+ ),
55
+ ]
56
+
57
+ def _info(self):
58
+ return datasets.DatasetInfo(
59
+ description=_DESCRIPTION,
60
+ features=datasets.Features(
61
+ {
62
+ "id": datasets.Value("string"),
63
+ "tokens": datasets.Sequence(datasets.Value("string")),
64
+ "pos_tags": datasets.Sequence(
65
+ datasets.features.ClassLabel(
66
+ names=[
67
+ "ACRNM",
68
+ "ADJ",
69
+ "ADV",
70
+ "ALFS",
71
+ "ART",
72
+ "BACKSLASH",
73
+ "CARD",
74
+ "CC",
75
+ "CCAD",
76
+ "CCNEG",
77
+ "CM",
78
+ "CODE",
79
+ "COLON",
80
+ "CQUE",
81
+ "CSUBF",
82
+ "CSUBI",
83
+ "CSUBX",
84
+ "DM",
85
+ "DOTS",
86
+ "FS",
87
+ "INT",
88
+ "LP",
89
+ "NC",
90
+ "NEG",
91
+ "NMEA",
92
+ "NMON",
93
+ "NP",
94
+ "ORD",
95
+ "PAL",
96
+ "PDEL",
97
+ "PE",
98
+ "PERCT",
99
+ "PPC",
100
+ "PPO",
101
+ "PPX",
102
+ "PREP",
103
+ "QT",
104
+ "QU",
105
+ "REL",
106
+ "RP",
107
+ "SE",
108
+ "SEMICOLON",
109
+ "SLASH",
110
+ "SYM",
111
+ "UMMX",
112
+ "VCLIfin",
113
+ "VCLIger",
114
+ "VCLIinf",
115
+ "VEadj",
116
+ "VEfin",
117
+ "VEger",
118
+ "VEinf",
119
+ "VHadj",
120
+ "VHfin",
121
+ "VHger",
122
+ "VHinf",
123
+ "VLadj",
124
+ "VLfin",
125
+ "VLger",
126
+ "VLinf",
127
+ "VMadj",
128
+ "VMfin",
129
+ "VMger",
130
+ "VMinf",
131
+ "VSadj",
132
+ "VSfin",
133
+ "VSger",
134
+ "VSinf",
135
+ ]
136
+ )
137
+ ),
138
+ "ner_tags": datasets.Sequence(
139
+ datasets.features.ClassLabel(
140
+ names=[
141
+ "O",
142
+ "B-PER",
143
+ "I-PER",
144
+ "B-ORG",
145
+ "I-ORG",
146
+ "B-LOC",
147
+ "I-LOC",
148
+ "B-MISC",
149
+ "I-MISC",
150
+ ]
151
+ )
152
+ ),
153
+ }
154
+ ),
155
+ supervised_keys=None,
156
+ homepage=_URL,
157
+ citation=_CITATION,
158
+ )
159
+
160
+ def _split_generators(self, dl_manager):
161
+ """Returns SplitGenerators."""
162
+ urls_to_download = {
163
+ "train": f"{_URL}{_TRAINING_FILE}",
164
+ }
165
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
166
+
167
+ return [
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TRAIN,
170
+ gen_kwargs={"filepath": downloaded_files["train"]},
171
+ ),
172
+ ]
173
+
174
+ def _generate_examples(self, filepath):
175
+ logging.info("⏳ Generating examples from = %s", filepath)
176
+ with open(filepath, encoding="utf-8") as f:
177
+ guid = 0
178
+ tokens = []
179
+ pos_tags = []
180
+ ner_tags = []
181
+ for line in f:
182
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
183
+ if tokens:
184
+ yield guid, {
185
+ "id": str(guid),
186
+ "tokens": tokens,
187
+ "pos_tags": pos_tags,
188
+ "ner_tags": ner_tags,
189
+ }
190
+ guid += 1
191
+ tokens = []
192
+ pos_tags = []
193
+ ner_tags = []
194
+ else:
195
+ splits = line.split(" ")
196
+ tokens.append(splits[0])
197
+ pos_tags.append(splits[1])
198
+ ner_tags.append(splits[2].rstrip())
199
+ # last example
200
+ if tokens:
201
+ yield guid, {
202
+ "id": str(guid),
203
+ "tokens": tokens,
204
+ "pos_tags": pos_tags,
205
+ "ner_tags": ner_tags,
206
+ }