asahi417 commited on
Commit
ca22fc0
1 Parent(s): 068e8ed
README.md ADDED
File without changes
conll2003.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """
2
+ import json
3
+ from itertools import chain
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """[CoNLL 2003 NER dataset](https://aclanthology.org/W03-0419/)"""
8
+ _URL = 'https://huggingface.co/datasets/tner/conll2003/raw/main/dataset'
9
+ _URLS = {
10
+ str(datasets.Split.TEST): [f'{_URL}/test{i:02d}.jsonl' for i in range(8)],
11
+ str(datasets.Split.TRAIN): [f'{_URL}/train{i:02d}.jsonl' for i in range(52)],
12
+ str(datasets.Split.VALIDATION): [f'{_URL}/validation{i:02d}.jsonl' for i in range(8)],
13
+ }
14
+
15
+ import os
16
+
17
+ import datasets
18
+
19
+
20
+ logger = datasets.logging.get_logger(__name__)
21
+
22
+
23
+ _CITATION = """\
24
+ @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
25
+ title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
26
+ author = "Tjong Kim Sang, Erik F. and
27
+ De Meulder, Fien",
28
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
29
+ year = "2003",
30
+ url = "https://www.aclweb.org/anthology/W03-0419",
31
+ pages = "142--147",
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
37
+ four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
38
+ not belong to the previous three groups.
39
+
40
+ The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
41
+ a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
42
+ a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
43
+ and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
44
+ if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
45
+ B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
46
+ tagging scheme, whereas the original dataset uses IOB1.
47
+
48
+ For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
49
+ """
50
+
51
+ _URL = "https://data.deepai.org/conll2003.zip"
52
+ _TRAINING_FILE = "train.txt"
53
+ _DEV_FILE = "valid.txt"
54
+ _TEST_FILE = "test.txt"
55
+
56
+
57
+ class Conll2003Config(datasets.BuilderConfig):
58
+ """BuilderConfig for Conll2003"""
59
+
60
+ def __init__(self, **kwargs):
61
+ """BuilderConfig forConll2003.
62
+
63
+ Args:
64
+ **kwargs: keyword arguments forwarded to super.
65
+ """
66
+ super(Conll2003Config, self).__init__(**kwargs)
67
+
68
+
69
+ class Conll2003(datasets.GeneratorBasedBuilder):
70
+ """Conll2003 dataset."""
71
+
72
+ BUILDER_CONFIGS = [
73
+ Conll2003Config(name="conll2003", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
74
+ ]
75
+
76
+ def _info(self):
77
+ return datasets.DatasetInfo(
78
+ description=_DESCRIPTION,
79
+ features=datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "tokens": datasets.Sequence(datasets.Value("string")),
83
+ "pos_tags": datasets.Sequence(
84
+ datasets.features.ClassLabel(
85
+ names=[
86
+ '"',
87
+ "''",
88
+ "#",
89
+ "$",
90
+ "(",
91
+ ")",
92
+ ",",
93
+ ".",
94
+ ":",
95
+ "``",
96
+ "CC",
97
+ "CD",
98
+ "DT",
99
+ "EX",
100
+ "FW",
101
+ "IN",
102
+ "JJ",
103
+ "JJR",
104
+ "JJS",
105
+ "LS",
106
+ "MD",
107
+ "NN",
108
+ "NNP",
109
+ "NNPS",
110
+ "NNS",
111
+ "NN|SYM",
112
+ "PDT",
113
+ "POS",
114
+ "PRP",
115
+ "PRP$",
116
+ "RB",
117
+ "RBR",
118
+ "RBS",
119
+ "RP",
120
+ "SYM",
121
+ "TO",
122
+ "UH",
123
+ "VB",
124
+ "VBD",
125
+ "VBG",
126
+ "VBN",
127
+ "VBP",
128
+ "VBZ",
129
+ "WDT",
130
+ "WP",
131
+ "WP$",
132
+ "WRB",
133
+ ]
134
+ )
135
+ ),
136
+ "chunk_tags": datasets.Sequence(
137
+ datasets.features.ClassLabel(
138
+ names=[
139
+ "O",
140
+ "B-ADJP",
141
+ "I-ADJP",
142
+ "B-ADVP",
143
+ "I-ADVP",
144
+ "B-CONJP",
145
+ "I-CONJP",
146
+ "B-INTJ",
147
+ "I-INTJ",
148
+ "B-LST",
149
+ "I-LST",
150
+ "B-NP",
151
+ "I-NP",
152
+ "B-PP",
153
+ "I-PP",
154
+ "B-PRT",
155
+ "I-PRT",
156
+ "B-SBAR",
157
+ "I-SBAR",
158
+ "B-UCP",
159
+ "I-UCP",
160
+ "B-VP",
161
+ "I-VP",
162
+ ]
163
+ )
164
+ ),
165
+ "ner_tags": datasets.Sequence(
166
+ datasets.features.ClassLabel(
167
+ names=[
168
+ "O",
169
+ "B-PER",
170
+ "I-PER",
171
+ "B-ORG",
172
+ "I-ORG",
173
+ "B-LOC",
174
+ "I-LOC",
175
+ "B-MISC",
176
+ "I-MISC",
177
+ ]
178
+ )
179
+ ),
180
+ }
181
+ ),
182
+ supervised_keys=None,
183
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
184
+ citation=_CITATION,
185
+ )
186
+
187
+ def _split_generators(self, dl_manager):
188
+ """Returns SplitGenerators."""
189
+ downloaded_file = dl_manager.download_and_extract(_URL)
190
+ data_files = {
191
+ "train": os.path.join(downloaded_file, _TRAINING_FILE),
192
+ "dev": os.path.join(downloaded_file, _DEV_FILE),
193
+ "test": os.path.join(downloaded_file, _TEST_FILE),
194
+ }
195
+
196
+ return [
197
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
198
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
199
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
200
+ ]
201
+
202
+ def _generate_examples(self, filepath):
203
+ logger.info("⏳ Generating examples from = %s", filepath)
204
+ with open(filepath, encoding="utf-8") as f:
205
+ guid = 0
206
+ tokens = []
207
+ pos_tags = []
208
+ chunk_tags = []
209
+ ner_tags = []
210
+ for line in f:
211
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
212
+ if tokens:
213
+ yield guid, {
214
+ "id": str(guid),
215
+ "tokens": tokens,
216
+ "pos_tags": pos_tags,
217
+ "chunk_tags": chunk_tags,
218
+ "ner_tags": ner_tags,
219
+ }
220
+ guid += 1
221
+ tokens = []
222
+ pos_tags = []
223
+ chunk_tags = []
224
+ ner_tags = []
225
+ else:
226
+ # conll2003 tokens are space separated
227
+ splits = line.split(" ")
228
+ tokens.append(splits[0])
229
+ pos_tags.append(splits[1])
230
+ chunk_tags.append(splits[2])
231
+ ner_tags.append(splits[3].rstrip())
232
+ # last example
233
+ if tokens:
234
+ yield guid, {
235
+ "id": str(guid),
236
+ "tokens": tokens,
237
+ "pos_tags": pos_tags,
238
+ "chunk_tags": chunk_tags,
239
+ "ner_tags": ner_tags,
240
+ }
dataset/conll2003.data.test.json ADDED
The diff for this file is too large to render. See raw diff
 
dataset/conll2003.data.train.json ADDED
The diff for this file is too large to render. See raw diff
 
dataset/conll2003.data.valid.json ADDED
The diff for this file is too large to render. See raw diff
 
dataset/conll2003.label.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"O": 0, "B-ORG": 1, "B-MISC": 2, "B-PER": 3, "I-PER": 4, "B-LOC": 5, "I-ORG": 6, "I-MISC": 7, "I-LOC": 8}