system HF staff commited on
Commit
334d6b2
1 Parent(s): cbb49c7

Update files from the datasets library (from 1.1.3)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.1.3

Files changed (2) hide show
  1. conll2003.py +125 -19
  2. dataset_infos.json +1 -1
conll2003.py CHANGED
@@ -80,10 +80,104 @@ class Conll2003(datasets.GeneratorBasedBuilder):
80
  features=datasets.Features(
81
  {
82
  "id": datasets.Value("string"),
83
- "words": datasets.Sequence(datasets.Value("string")),
84
- "pos": datasets.Sequence(datasets.Value("string")),
85
- "chunk": datasets.Sequence(datasets.Value("string")),
86
- "ner": datasets.Sequence(datasets.Value("string")),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  }
88
  ),
89
  supervised_keys=None,
@@ -110,25 +204,37 @@ class Conll2003(datasets.GeneratorBasedBuilder):
110
  logging.info("⏳ Generating examples from = %s", filepath)
111
  with open(filepath, encoding="utf-8") as f:
112
  guid = 0
113
- words = []
114
- pos = []
115
- chunk = []
116
- ner = []
117
  for line in f:
118
  if line.startswith("-DOCSTART-") or line == "" or line == "\n":
119
- if words:
120
- yield guid, {"id": str(guid), "words": words, "pos": pos, "chunk": chunk, "ner": ner}
 
 
 
 
 
 
121
  guid += 1
122
- words = []
123
- pos = []
124
- chunk = []
125
- ner = []
126
  else:
127
  # conll2003 tokens are space separated
128
  splits = line.split(" ")
129
- words.append(splits[0])
130
- pos.append(splits[1])
131
- chunk.append(splits[2])
132
- ner.append(splits[3].rstrip())
133
  # last example
134
- yield guid, {"id": str(guid), "words": words, "pos": pos, "chunk": chunk, "ner": ner}
 
 
 
 
 
 
 
80
  features=datasets.Features(
81
  {
82
  "id": datasets.Value("string"),
83
+ "tokens": datasets.Sequence(datasets.Value("string")),
84
+ "pos_tags": datasets.Sequence(
85
+ datasets.features.ClassLabel(
86
+ names=[
87
+ '"',
88
+ "''",
89
+ "#",
90
+ "$",
91
+ "(",
92
+ ")",
93
+ ",",
94
+ ".",
95
+ ":",
96
+ "``",
97
+ "CC",
98
+ "CD",
99
+ "DT",
100
+ "EX",
101
+ "FW",
102
+ "IN",
103
+ "JJ",
104
+ "JJR",
105
+ "JJS",
106
+ "LS",
107
+ "MD",
108
+ "NN",
109
+ "NNP",
110
+ "NNPS",
111
+ "NNS",
112
+ "NN|SYM",
113
+ "PDT",
114
+ "POS",
115
+ "PRP",
116
+ "PRP$",
117
+ "RB",
118
+ "RBR",
119
+ "RBS",
120
+ "RP",
121
+ "SYM",
122
+ "TO",
123
+ "UH",
124
+ "VB",
125
+ "VBD",
126
+ "VBG",
127
+ "VBN",
128
+ "VBP",
129
+ "VBZ",
130
+ "WDT",
131
+ "WP",
132
+ "WP$",
133
+ "WRB",
134
+ ]
135
+ )
136
+ ),
137
+ "chunk_tags": datasets.Sequence(
138
+ datasets.features.ClassLabel(
139
+ names=[
140
+ "O",
141
+ "B-ADJP",
142
+ "I-ADJP",
143
+ "B-ADVP",
144
+ "I-ADVP",
145
+ "B-CONJP",
146
+ "I-CONJP",
147
+ "B-INTJ",
148
+ "I-INTJ",
149
+ "B-LST",
150
+ "I-LST",
151
+ "B-NP",
152
+ "I-NP",
153
+ "B-PP",
154
+ "I-PP",
155
+ "B-PRT",
156
+ "I-PRT",
157
+ "B-SBAR",
158
+ "I-SBAR",
159
+ "B-UCP",
160
+ "I-UCP",
161
+ "B-VP",
162
+ "I-VP",
163
+ ]
164
+ )
165
+ ),
166
+ "ner_tags": datasets.Sequence(
167
+ datasets.features.ClassLabel(
168
+ names=[
169
+ "O",
170
+ "B-PER",
171
+ "I-PER",
172
+ "B-ORG",
173
+ "I-ORG",
174
+ "B-LOC",
175
+ "I-LOC",
176
+ "B-MISC",
177
+ "I-MISC",
178
+ ]
179
+ )
180
+ ),
181
  }
182
  ),
183
  supervised_keys=None,
 
204
  logging.info("⏳ Generating examples from = %s", filepath)
205
  with open(filepath, encoding="utf-8") as f:
206
  guid = 0
207
+ tokens = []
208
+ pos_tags = []
209
+ chunk_tags = []
210
+ ner_tags = []
211
  for line in f:
212
  if line.startswith("-DOCSTART-") or line == "" or line == "\n":
213
+ if tokens:
214
+ yield guid, {
215
+ "id": str(guid),
216
+ "tokens": tokens,
217
+ "pos_tags": pos_tags,
218
+ "chunk_tags": chunk_tags,
219
+ "ner_tags": ner_tags,
220
+ }
221
  guid += 1
222
+ tokens = []
223
+ pos_tags = []
224
+ chunk_tags = []
225
+ ner_tags = []
226
  else:
227
  # conll2003 tokens are space separated
228
  splits = line.split(" ")
229
+ tokens.append(splits[0])
230
+ pos_tags.append(splits[1])
231
+ chunk_tags.append(splits[2])
232
+ ner_tags.append(splits[3].rstrip())
233
  # last example
234
+ yield guid, {
235
+ "id": str(guid),
236
+ "tokens": tokens,
237
+ "pos_tags": pos_tags,
238
+ "chunk_tags": chunk_tags,
239
+ "ner_tags": ner_tags,
240
+ }
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"conll2003": {"description": "The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on\nfour types of named entities: persons, locations, organizations and names of miscellaneous entities that do\nnot belong to the previous three groups.\n\nThe CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on\na separate line and there is an empty line after each sentence. The first item on each line is a word, the second\na part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags\nand the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only\nif two phrases of the same type immediately follow each other, the first word of the second phrase will have tag\nB-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2\ntagging scheme, whereas the original dataset uses IOB1.\n\nFor more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419\n", "citation": "@inproceedings{tjong-kim-sang-de-meulder-2003-introduction,\n title = \"Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition\",\n author = \"Tjong Kim Sang, Erik F. and\n De Meulder, Fien\",\n booktitle = \"Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003\",\n year = \"2003\",\n url = \"https://www.aclweb.org/anthology/W03-0419\",\n pages = \"142--147\",\n}\n", "homepage": "https://www.aclweb.org/anthology/W03-0419/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "chunk": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "conll2003", "config_name": "conll2003", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6032006, "num_examples": 14041, "dataset_name": "conll2003"}, "validation": {"name": "validation", "num_bytes": 1512711, "num_examples": 3250, "dataset_name": "conll2003"}, "test": {"name": "test", "num_bytes": 1378578, "num_examples": 3453, "dataset_name": "conll2003"}}, "download_checksums": {"https://github.com/davidsbatista/NER-datasets/raw/master/CONLL2003/train.txt": {"num_bytes": 3283418, "checksum": "c99b26852dabf57ca9d30a0e892b84544cc8962003151e14a71077c55dc66db5"}, "https://github.com/davidsbatista/NER-datasets/raw/master/CONLL2003/valid.txt": {"num_bytes": 827441, "checksum": "f1f6469322876887de1d04acd43c59b02f59d5b02acf42c027132fa1bf349cb2"}, "https://github.com/davidsbatista/NER-datasets/raw/master/CONLL2003/test.txt": {"num_bytes": 748093, "checksum": "82e0c72d262f86ad3e78b15c5d980bbf87cb205aa4bf6d2d97643f463f8d7ff7"}}, "download_size": 4858952, "post_processing_size": null, "dataset_size": 8923295, "size_in_bytes": 13782247}}
 
1
+ {"conll2003": {"description": "The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on\nfour types of named entities: persons, locations, organizations and names of miscellaneous entities that do\nnot belong to the previous three groups.\n\nThe CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on\na separate line and there is an empty line after each sentence. The first item on each line is a word, the second\na part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags\nand the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only\nif two phrases of the same type immediately follow each other, the first word of the second phrase will have tag\nB-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2\ntagging scheme, whereas the original dataset uses IOB1.\n\nFor more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419\n", "citation": "@inproceedings{tjong-kim-sang-de-meulder-2003-introduction,\n title = \"Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition\",\n author = \"Tjong Kim Sang, Erik F. and\n De Meulder, Fien\",\n booktitle = \"Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003\",\n year = \"2003\",\n url = \"https://www.aclweb.org/anthology/W03-0419\",\n pages = \"142--147\",\n}\n", "homepage": "https://www.aclweb.org/anthology/W03-0419/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 47, "names": ["\"", "''", "#", "$", "(", ")", ",", ".", ":", "``", "CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "LS", "MD", "NN", "NNP", "NNPS", "NNS", "NN|SYM", "PDT", "POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "SYM", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WP$", "WRB"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "chunk_tags": {"feature": {"num_classes": 23, "names": ["O", "B-ADJP", "I-ADJP", "B-ADVP", "I-ADVP", "B-CONJP", "I-CONJP", "B-INTJ", "I-INTJ", "B-LST", "I-LST", "B-NP", "I-NP", "B-PP", "I-PP", "B-PRT", "I-PRT", "B-SBAR", "I-SBAR", "B-UCP", "I-UCP", "B-VP", "I-VP"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 9, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "conll2003", "config_name": "conll2003", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6931393, "num_examples": 14041, "dataset_name": "conll2003"}, "validation": {"name": "validation", "num_bytes": 1739247, "num_examples": 3250, "dataset_name": "conll2003"}, "test": {"name": "test", "num_bytes": 1582078, "num_examples": 3453, "dataset_name": "conll2003"}}, "download_checksums": {"https://github.com/davidsbatista/NER-datasets/raw/master/CONLL2003/train.txt": {"num_bytes": 3283418, "checksum": "c99b26852dabf57ca9d30a0e892b84544cc8962003151e14a71077c55dc66db5"}, "https://github.com/davidsbatista/NER-datasets/raw/master/CONLL2003/valid.txt": {"num_bytes": 827441, "checksum": "f1f6469322876887de1d04acd43c59b02f59d5b02acf42c027132fa1bf349cb2"}, "https://github.com/davidsbatista/NER-datasets/raw/master/CONLL2003/test.txt": {"num_bytes": 748093, "checksum": "82e0c72d262f86ad3e78b15c5d980bbf87cb205aa4bf6d2d97643f463f8d7ff7"}}, "download_size": 4858952, "post_processing_size": null, "dataset_size": 10252718, "size_in_bytes": 15111670}}