Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
10K - 100K
Commit
·
1301e71
1
Parent(s):
6a19078
add classic conll
Browse files- README.md +30 -0
- data/metadata.json +101 -0
- data/test.parquet +3 -0
- data/train.parquet +3 -0
- data/validation.parquet +3 -0
- preprocess.py +94 -0
README.md
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
pretty_name: CoNLL-03
|
| 3 |
+
task_categories:
|
| 4 |
+
- token-classification
|
| 5 |
+
task_ids:
|
| 6 |
+
- named-entity-recognition
|
| 7 |
+
language:
|
| 8 |
+
- en
|
| 9 |
+
configs:
|
| 10 |
+
- config_name: default
|
| 11 |
+
data_files:
|
| 12 |
+
train: data/train.parquet
|
| 13 |
+
validation: data/validation.parquet
|
| 14 |
+
test: data/test.parquet
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
# CoNLL-03
|
| 18 |
+
|
| 19 |
+
A classic benchmark dataset for English Named Entity Recognition (NER) with entity types PER, ORG, LOC, and MISC.
|
| 20 |
+
|
| 21 |
+
- **Size:** small, clean, ready to use
|
| 22 |
+
- **Source:** loaded with `datasets v3.6` (metadata + files)
|
| 23 |
+
- **Format:** stored in Parquet
|
| 24 |
+
- **Compatibility:** 🧩 works with `datasets >= 4.0` (script loaders deprecated)
|
| 25 |
+
|
| 26 |
+
## Reference
|
| 27 |
+
|
| 28 |
+
Sang, E. F., & De Meulder, F. (2003).
|
| 29 |
+
*Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition.*
|
| 30 |
+
[ACL Anthology](https://aclanthology.org/W03-0419/)
|
data/metadata.json
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"num_rows": {
|
| 3 |
+
"train": 14041,
|
| 4 |
+
"validation": 3250,
|
| 5 |
+
"test": 3453
|
| 6 |
+
},
|
| 7 |
+
"features": {
|
| 8 |
+
"id": "Value(dtype='string', id=None)",
|
| 9 |
+
"tokens": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)",
|
| 10 |
+
"pos_tags": "Sequence(feature=ClassLabel(names=['\"', \"''\", '#', '$', '(', ')', ',', '.', ':', '``', 'CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD', 'NN', 'NNP', 'NNPS', 'NNS', 'NN|SYM', 'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'WDT', 'WP', 'WP$', 'WRB'], id=None), length=-1, id=None)",
|
| 11 |
+
"chunk_tags": "Sequence(feature=ClassLabel(names=['O', 'B-ADJP', 'I-ADJP', 'B-ADVP', 'I-ADVP', 'B-CONJP', 'I-CONJP', 'B-INTJ', 'I-INTJ', 'B-LST', 'I-LST', 'B-NP', 'I-NP', 'B-PP', 'I-PP', 'B-PRT', 'I-PRT', 'B-SBAR', 'I-SBAR', 'B-UCP', 'I-UCP', 'B-VP', 'I-VP'], id=None), length=-1, id=None)",
|
| 12 |
+
"ner_tags": "Sequence(feature=ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC'], id=None), length=-1, id=None)"
|
| 13 |
+
},
|
| 14 |
+
"label_maps": {
|
| 15 |
+
"ner_tags": [
|
| 16 |
+
"O",
|
| 17 |
+
"B-PER",
|
| 18 |
+
"I-PER",
|
| 19 |
+
"B-ORG",
|
| 20 |
+
"I-ORG",
|
| 21 |
+
"B-LOC",
|
| 22 |
+
"I-LOC",
|
| 23 |
+
"B-MISC",
|
| 24 |
+
"I-MISC"
|
| 25 |
+
],
|
| 26 |
+
"chunk_tags": [
|
| 27 |
+
"O",
|
| 28 |
+
"B-ADJP",
|
| 29 |
+
"I-ADJP",
|
| 30 |
+
"B-ADVP",
|
| 31 |
+
"I-ADVP",
|
| 32 |
+
"B-CONJP",
|
| 33 |
+
"I-CONJP",
|
| 34 |
+
"B-INTJ",
|
| 35 |
+
"I-INTJ",
|
| 36 |
+
"B-LST",
|
| 37 |
+
"I-LST",
|
| 38 |
+
"B-NP",
|
| 39 |
+
"I-NP",
|
| 40 |
+
"B-PP",
|
| 41 |
+
"I-PP",
|
| 42 |
+
"B-PRT",
|
| 43 |
+
"I-PRT",
|
| 44 |
+
"B-SBAR",
|
| 45 |
+
"I-SBAR",
|
| 46 |
+
"B-UCP",
|
| 47 |
+
"I-UCP",
|
| 48 |
+
"B-VP",
|
| 49 |
+
"I-VP"
|
| 50 |
+
],
|
| 51 |
+
"pos_tags": [
|
| 52 |
+
"\"",
|
| 53 |
+
"''",
|
| 54 |
+
"#",
|
| 55 |
+
"$",
|
| 56 |
+
"(",
|
| 57 |
+
")",
|
| 58 |
+
",",
|
| 59 |
+
".",
|
| 60 |
+
":",
|
| 61 |
+
"``",
|
| 62 |
+
"CC",
|
| 63 |
+
"CD",
|
| 64 |
+
"DT",
|
| 65 |
+
"EX",
|
| 66 |
+
"FW",
|
| 67 |
+
"IN",
|
| 68 |
+
"JJ",
|
| 69 |
+
"JJR",
|
| 70 |
+
"JJS",
|
| 71 |
+
"LS",
|
| 72 |
+
"MD",
|
| 73 |
+
"NN",
|
| 74 |
+
"NNP",
|
| 75 |
+
"NNPS",
|
| 76 |
+
"NNS",
|
| 77 |
+
"NN|SYM",
|
| 78 |
+
"PDT",
|
| 79 |
+
"POS",
|
| 80 |
+
"PRP",
|
| 81 |
+
"PRP$",
|
| 82 |
+
"RB",
|
| 83 |
+
"RBR",
|
| 84 |
+
"RBS",
|
| 85 |
+
"RP",
|
| 86 |
+
"SYM",
|
| 87 |
+
"TO",
|
| 88 |
+
"UH",
|
| 89 |
+
"VB",
|
| 90 |
+
"VBD",
|
| 91 |
+
"VBG",
|
| 92 |
+
"VBN",
|
| 93 |
+
"VBP",
|
| 94 |
+
"VBZ",
|
| 95 |
+
"WDT",
|
| 96 |
+
"WP",
|
| 97 |
+
"WP$",
|
| 98 |
+
"WRB"
|
| 99 |
+
]
|
| 100 |
+
}
|
| 101 |
+
}
|
data/test.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:63bde81721007707a28b01f68499b0b26dec8d238601a98c59cdaa70b231d911
|
| 3 |
+
size 262574
|
data/train.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4a8eb767b8d2bf4daed9c931770749337e414b928676ba419fefd08f5d823ed3
|
| 3 |
+
size 1144427
|
data/validation.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cade413b61722ec41c2377604910a41a91c385c8894abf2034dbc39b8e9ac385
|
| 3 |
+
size 292226
|
preprocess.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import shutil
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import TypedDict
|
| 6 |
+
|
| 7 |
+
from datasets import DatasetDict, load_dataset
|
| 8 |
+
|
| 9 |
+
OUT_DIR = Path(__file__).parent / "data"
|
| 10 |
+
METADATA_PATH = OUT_DIR / "metadata.json"
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ConllExample(TypedDict):
|
| 14 |
+
tokens: list[str]
|
| 15 |
+
ner_tags: list[str]
|
| 16 |
+
chunk_tags: list[str]
|
| 17 |
+
pos_tags: list[str]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class LabelMaps(TypedDict):
|
| 21 |
+
ner_tags: list[str]
|
| 22 |
+
chunk_tags: list[str]
|
| 23 |
+
pos_tags: list[str]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def ids_to_strings(example: dict, label_maps: LabelMaps) -> ConllExample:
|
| 27 |
+
return {
|
| 28 |
+
"tokens": example["tokens"],
|
| 29 |
+
"ner_tags": [label_maps["ner_tags"][i] for i in example["ner_tags"]],
|
| 30 |
+
"chunk_tags": [label_maps["chunk_tags"][i] for i in example["chunk_tags"]],
|
| 31 |
+
"pos_tags": [label_maps["pos_tags"][i] for i in example["pos_tags"]],
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def extract_label_maps(data: DatasetDict) -> LabelMaps:
|
| 36 |
+
feats = data["train"].features
|
| 37 |
+
return {
|
| 38 |
+
"ner_tags": feats["ner_tags"].feature.names,
|
| 39 |
+
"chunk_tags": feats["chunk_tags"].feature.names,
|
| 40 |
+
"pos_tags": feats["pos_tags"].feature.names,
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def extract_metadata(data: DatasetDict, label_maps: LabelMaps) -> dict:
|
| 45 |
+
num_rows = {split_name: int(split.num_rows) for split_name, split in data.items()}
|
| 46 |
+
features = {name: repr(feature) for name, feature in data["train"].features.items()}
|
| 47 |
+
return {"num_rows": num_rows, "features": features, "label_maps": label_maps}
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def main() -> None:
|
| 51 |
+
"""Load CoNLL-03 with datasets v3, save as Parquet and add metadata.
|
| 52 |
+
|
| 53 |
+
Run: python preprocess.py --out-dir data
|
| 54 |
+
"""
|
| 55 |
+
ap = argparse.ArgumentParser()
|
| 56 |
+
ap.add_argument("--out-dir", type=Path, help="Output directory for Parquet files")
|
| 57 |
+
ap.add_argument("--metadata-path", type=Path, help="Path for metadata.json")
|
| 58 |
+
args = ap.parse_args()
|
| 59 |
+
|
| 60 |
+
out_dir = args.out_dir or OUT_DIR
|
| 61 |
+
metadata_path = args.metadata_path or METADATA_PATH
|
| 62 |
+
|
| 63 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 64 |
+
cache_path = Path(__file__).parent / "tmp"
|
| 65 |
+
|
| 66 |
+
# using datasets v3.6
|
| 67 |
+
data = load_dataset("conll2003", cache_dir=str(cache_path))
|
| 68 |
+
|
| 69 |
+
split_map = {"train": "train", "validation": "validation", "test": "test"}
|
| 70 |
+
if "validation" not in data and "valid" in data:
|
| 71 |
+
split_map["validation"] = "valid"
|
| 72 |
+
|
| 73 |
+
label_maps = extract_label_maps(data)
|
| 74 |
+
meta = extract_metadata(data, label_maps)
|
| 75 |
+
|
| 76 |
+
for split, split_name in split_map.items():
|
| 77 |
+
if split_name not in data:
|
| 78 |
+
continue
|
| 79 |
+
|
| 80 |
+
out_path = out_dir / f"{split}.parquet"
|
| 81 |
+
ds_str = data[split_name].map(ids_to_strings, fn_kwargs={"label_maps": label_maps})
|
| 82 |
+
if "id" in ds_str.column_names:
|
| 83 |
+
ds_str = ds_str.remove_columns("id")
|
| 84 |
+
|
| 85 |
+
ds_str.to_parquet(str(out_path))
|
| 86 |
+
|
| 87 |
+
metadata_path.write_text(json.dumps(meta, indent=2), encoding="utf-8")
|
| 88 |
+
|
| 89 |
+
if cache_path.exists():
|
| 90 |
+
shutil.rmtree(cache_path)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
if __name__ == "__main__":
|
| 94 |
+
main()
|