firas-meddeb98 commited on
Commit
bd9ad44
1 Parent(s): 2ab3398

Create dataset_nlp.py

Browse files
Files changed (1) hide show
  1. dataset_nlp.py +241 -0
dataset_nlp.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
18
+
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
29
+ title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
30
+ author = "Tjong Kim Sang, Erik F. and
31
+ De Meulder, Fien",
32
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
33
+ year = "2003",
34
+ url = "https://www.aclweb.org/anthology/W03-0419",
35
+ pages = "142--147",
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
41
+ four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
42
+ not belong to the previous three groups.
43
+ The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
44
+ a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
45
+ a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
46
+ and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
47
+ if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
48
+ B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
49
+ tagging scheme, whereas the original dataset uses IOB1.
50
+ For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
51
+ """
52
+
53
+ _URL = "https://data.deepai.org/conll2003.zip"
54
+ _TRAINING_FILE = "train.txt"
55
+ _DEV_FILE = "valid.txt"
56
+ _TEST_FILE = "test.txt"
57
+
58
+
59
+ class Conll2003Config(datasets.BuilderConfig):
60
+ """BuilderConfig for Conll2003"""
61
+
62
+ def __init__(self, **kwargs):
63
+ """BuilderConfig forConll2003.
64
+ Args:
65
+ **kwargs: keyword arguments forwarded to super.
66
+ """
67
+ super(Conll2003Config, self).__init__(**kwargs)
68
+
69
+
70
+ class Conll2003(datasets.GeneratorBasedBuilder):
71
+ """Conll2003 dataset."""
72
+
73
+ BUILDER_CONFIGS = [
74
+ Conll2003Config(name="conll2003", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
75
+ ]
76
+
77
+ def _info(self):
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=datasets.Features(
81
+ {
82
+ "id": datasets.Value("string"),
83
+ "tokens": datasets.Sequence(datasets.Value("string")),
84
+ "pos_tags": datasets.Sequence(
85
+ datasets.features.ClassLabel(
86
+ names=[
87
+ '"',
88
+ "''",
89
+ "#",
90
+ "$",
91
+ "(",
92
+ ")",
93
+ ",",
94
+ ".",
95
+ ":",
96
+ "``",
97
+ "CC",
98
+ "CD",
99
+ "DT",
100
+ "EX",
101
+ "FW",
102
+ "IN",
103
+ "JJ",
104
+ "JJR",
105
+ "JJS",
106
+ "LS",
107
+ "MD",
108
+ "NN",
109
+ "NNP",
110
+ "NNPS",
111
+ "NNS",
112
+ "NN|SYM",
113
+ "PDT",
114
+ "POS",
115
+ "PRP",
116
+ "PRP$",
117
+ "RB",
118
+ "RBR",
119
+ "RBS",
120
+ "RP",
121
+ "SYM",
122
+ "TO",
123
+ "UH",
124
+ "VB",
125
+ "VBD",
126
+ "VBG",
127
+ "VBN",
128
+ "VBP",
129
+ "VBZ",
130
+ "WDT",
131
+ "WP",
132
+ "WP$",
133
+ "WRB",
134
+ ]
135
+ )
136
+ ),
137
+ "chunk_tags": datasets.Sequence(
138
+ datasets.features.ClassLabel(
139
+ names=[
140
+ "O",
141
+ "B-ADJP",
142
+ "I-ADJP",
143
+ "B-ADVP",
144
+ "I-ADVP",
145
+ "B-CONJP",
146
+ "I-CONJP",
147
+ "B-INTJ",
148
+ "I-INTJ",
149
+ "B-LST",
150
+ "I-LST",
151
+ "B-NP",
152
+ "I-NP",
153
+ "B-PP",
154
+ "I-PP",
155
+ "B-PRT",
156
+ "I-PRT",
157
+ "B-SBAR",
158
+ "I-SBAR",
159
+ "B-UCP",
160
+ "I-UCP",
161
+ "B-VP",
162
+ "I-VP",
163
+ ]
164
+ )
165
+ ),
166
+ "ner_tags": datasets.Sequence(
167
+ datasets.features.ClassLabel(
168
+ names=[
169
+ "O",
170
+ "B-PER",
171
+ "I-PER",
172
+ "B-ORG",
173
+ "I-ORG",
174
+ "B-LOC",
175
+ "I-LOC",
176
+ "B-MISC",
177
+ "I-MISC",
178
+ ]
179
+ )
180
+ ),
181
+ }
182
+ ),
183
+ supervised_keys=None,
184
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
185
+ citation=_CITATION,
186
+ )
187
+
188
+ def _split_generators(self, dl_manager):
189
+ """Returns SplitGenerators."""
190
+ downloaded_file = dl_manager.download_and_extract(_URL)
191
+ data_files = {
192
+ "train": os.path.join(downloaded_file, _TRAINING_FILE),
193
+ "dev": os.path.join(downloaded_file, _DEV_FILE),
194
+ "test": os.path.join(downloaded_file, _TEST_FILE),
195
+ }
196
+
197
+ return [
198
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
199
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
200
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
201
+ ]
202
+
203
+ def _generate_examples(self, filepath):
204
+ logger.info("⏳ Generating examples from = %s", filepath)
205
+ with open(filepath, encoding="utf-8") as f:
206
+ guid = 0
207
+ tokens = []
208
+ pos_tags = []
209
+ chunk_tags = []
210
+ ner_tags = []
211
+ for line in f:
212
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
213
+ if tokens:
214
+ yield guid, {
215
+ "id": str(guid),
216
+ "tokens": tokens,
217
+ "pos_tags": pos_tags,
218
+ "chunk_tags": chunk_tags,
219
+ "ner_tags": ner_tags,
220
+ }
221
+ guid += 1
222
+ tokens = []
223
+ pos_tags = []
224
+ chunk_tags = []
225
+ ner_tags = []
226
+ else:
227
+ # conll2003 tokens are space separated
228
+ splits = line.split(" ")
229
+ tokens.append(splits[0])
230
+ pos_tags.append(splits[1])
231
+ chunk_tags.append(splits[2])
232
+ ner_tags.append(splits[3].rstrip())
233
+ # last example
234
+ if tokens:
235
+ yield guid, {
236
+ "id": str(guid),
237
+ "tokens": tokens,
238
+ "pos_tags": pos_tags,
239
+ "chunk_tags": chunk_tags,
240
+ "ner_tags": ner_tags,
241
+ }