Datasets:

Modalities:
Text
Languages:
Spanish
Libraries:
Datasets
Blanca commited on
Commit
eb715b8
1 Parent(s): 7f0b529

Upload CoNLL-NERC-es.py

Browse files
Files changed (1) hide show
  1. CoNLL-NERC-es.py +224 -0
CoNLL-NERC-es.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Introduction to the CoNLL-2002 Shared Task: Language-Independent Named Entity Recognition"""
18
+
19
+ import datasets
20
+
21
+
22
+ logger = datasets.logging.get_logger(__name__)
23
+
24
+
25
+ _CITATION = """\
26
+ @inproceedings{tjong-kim-sang-2002-introduction,
27
+ title = "Introduction to the {C}o{NLL}-2002 Shared Task: Language-Independent Named Entity Recognition",
28
+ author = "Tjong Kim Sang, Erik F.",
29
+ booktitle = "{COLING}-02: The 6th Conference on Natural Language Learning 2002 ({C}o{NLL}-2002)",
30
+ year = "2002",
31
+ url = "https://www.aclweb.org/anthology/W02-2024",
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ Named entities are phrases that contain the names of persons, organizations, locations, times and quantities.
37
+
38
+ Example:
39
+ [PER Wolff] , currently a journalist in [LOC Argentina] , played with [PER Del Bosque] in the final years of the seventies in [ORG Real Madrid] .
40
+
41
+ The shared task of CoNLL-2002 concerns language-independent named entity recognition.
42
+ We will concentrate on four types of named entities: persons, locations, organizations and names of miscellaneous entities that do not belong to the previous three groups.
43
+ The participants of the shared task will be offered training and test data for at least two languages.
44
+ They will use the data for developing a named-entity recognition system that includes a machine learning component.
45
+ Information sources other than the training data may be used in this shared task.
46
+ We are especially interested in methods that can use additional unannotated data for improving their performance (for example co-training).
47
+
48
+ The train/validation/test sets are available in Spanish and Dutch.
49
+
50
+ For more details see https://www.clips.uantwerpen.be/conll2002/ner/ and https://www.aclweb.org/anthology/W02-2024/
51
+ """
52
+
53
+ _URL = "https://www.cs.upc.edu/~nlp/tools/nerc/"
54
+ _TRAINING_FILE = "esp.train.gz"
55
+ _DEV_FILE = "esp.testa.gz"
56
+ _TEST_FILE = "esp.testb.gz"
57
+
58
+
59
+ class Conll2002Config(datasets.BuilderConfig):
60
+ """BuilderConfig for Conll2002"""
61
+
62
+ def __init__(self, **kwargs):
63
+ """BuilderConfig forConll2002.
64
+
65
+ Args:
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(Conll2002Config, self).__init__(**kwargs)
69
+
70
+
71
+ class Conll2002(datasets.GeneratorBasedBuilder):
72
+ """Conll2002 dataset."""
73
+
74
+ BUILDER_CONFIGS = [
75
+ Conll2002Config(name="es", version=datasets.Version("1.0.0"), description="Conll2002 Spanish dataset"),
76
+ ]
77
+
78
+ def _info(self):
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=datasets.Features(
82
+ {
83
+ "id": datasets.Value("string"),
84
+ "tokens": datasets.Sequence(datasets.Value("string")),
85
+ "pos_tags": datasets.Sequence(
86
+ datasets.features.ClassLabel(
87
+ names=[
88
+ "AO",
89
+ "AQ",
90
+ "CC",
91
+ "CS",
92
+ "DA",
93
+ "DE",
94
+ "DD",
95
+ "DI",
96
+ "DN",
97
+ "DP",
98
+ "DT",
99
+ "Faa",
100
+ "Fat",
101
+ "Fc",
102
+ "Fd",
103
+ "Fe",
104
+ "Fg",
105
+ "Fh",
106
+ "Fia",
107
+ "Fit",
108
+ "Fp",
109
+ "Fpa",
110
+ "Fpt",
111
+ "Fs",
112
+ "Ft",
113
+ "Fx",
114
+ "Fz",
115
+ "I",
116
+ "NC",
117
+ "NP",
118
+ "P0",
119
+ "PD",
120
+ "PI",
121
+ "PN",
122
+ "PP",
123
+ "PR",
124
+ "PT",
125
+ "PX",
126
+ "RG",
127
+ "RN",
128
+ "SP",
129
+ "VAI",
130
+ "VAM",
131
+ "VAN",
132
+ "VAP",
133
+ "VAS",
134
+ "VMG",
135
+ "VMI",
136
+ "VMM",
137
+ "VMN",
138
+ "VMP",
139
+ "VMS",
140
+ "VSG",
141
+ "VSI",
142
+ "VSM",
143
+ "VSN",
144
+ "VSP",
145
+ "VSS",
146
+ "Y",
147
+ "Z",
148
+ ]
149
+ )
150
+ if self.config.name == "es"
151
+ else datasets.features.ClassLabel(
152
+ names=["Adj", "Adv", "Art", "Conj", "Int", "Misc", "N", "Num", "Prep", "Pron", "Punc", "V"]
153
+ )
154
+ ),
155
+ "ner_tags": datasets.Sequence(
156
+ datasets.features.ClassLabel(
157
+ names=[
158
+ "O",
159
+ "B-PER",
160
+ "I-PER",
161
+ "B-ORG",
162
+ "I-ORG",
163
+ "B-LOC",
164
+ "I-LOC",
165
+ "B-MISC",
166
+ "I-MISC",
167
+ ]
168
+ )
169
+ ),
170
+ }
171
+ ),
172
+ supervised_keys=None,
173
+ homepage="https://www.aclweb.org/anthology/W02-2024/",
174
+ citation=_CITATION,
175
+ )
176
+
177
+ def _split_generators(self, dl_manager):
178
+ """Returns SplitGenerators."""
179
+ urls_to_download = {
180
+ "train": f"{_URL}{_TRAINING_FILE}",
181
+ "dev": f"{_URL}{_DEV_FILE}",
182
+ "test": f"{_URL}{_TEST_FILE}",
183
+ }
184
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
185
+
186
+ return [
187
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
188
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
189
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
190
+ ]
191
+
192
+ def _generate_examples(self, filepath):
193
+ logger.info("⏳ Generating examples from = %s", filepath)
194
+ with open(filepath, encoding="latin-1") as f:
195
+ guid = 0
196
+ tokens = []
197
+ pos_tags = []
198
+ ner_tags = []
199
+ for line in f:
200
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
201
+ if tokens:
202
+ yield guid, {
203
+ "id": str(guid),
204
+ "tokens": tokens,
205
+ "pos_tags": pos_tags,
206
+ "ner_tags": ner_tags,
207
+ }
208
+ guid += 1
209
+ tokens = []
210
+ pos_tags = []
211
+ ner_tags = []
212
+ else:
213
+ # conll2002 tokens are space separated
214
+ splits = line.split(" ")
215
+ tokens.append(splits[0])
216
+ pos_tags.append(splits[1])
217
+ ner_tags.append(splits[2].rstrip())
218
+ # last example
219
+ yield guid, {
220
+ "id": str(guid),
221
+ "tokens": tokens,
222
+ "pos_tags": pos_tags,
223
+ "ner_tags": ner_tags,
224
+ }