MultiCoNER / MultiCoNER.py
Tom Aarsen
Remove upper call
fca946c
raw history blame
No virus
8.13 kB
# coding=utf-8
"""MultiCoNER: A Large-scale Multilingual dataset for Complex Named Entity Recognition"""
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@misc{malmasi2022multiconer,
title={MultiCoNER: A Large-scale Multilingual dataset for Complex Named Entity Recognition},
author={Shervin Malmasi and Anjie Fang and Besnik Fetahu and Sudipta Kar and Oleg Rokhlenko},
year={2022},
eprint={2208.14536},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
We present MultiCoNER, a large multilingual dataset for Named Entity Recognition that covers 3 domains (Wiki \
sentences, questions, and search queries) across 11 languages, as well as multilingual and code-mixing subsets. \
This dataset is designed to represent contemporary challenges in NER, including low-context scenarios (short \
and uncased text), syntactically complex entities like movie titles, and long-tail entity distributions. The \
26M token dataset is compiled from public resources using techniques such as heuristic-based sentence sampling, \
template extraction and slotting, and machine translation. We applied two NER models on our dataset: a baseline \
XLM-RoBERTa model, and a state-of-the-art GEMNET model that leverages gazetteers. The baseline achieves moderate \
performance (macro-F1=54%), highlighting the difficulty of our data. GEMNET, which uses gazetteers, improvement \
significantly (average improvement of macro-F1=+30%). MultiCoNER poses challenges even for large pre-trained \
language models, and we believe that it can help further research in building robust NER systems. MultiCoNER \
is publicly available at https://registry.opendata.aws/multiconer/ and we hope that this resource will help \
advance research in various aspects of NER.
"""
subset_to_dir = {
"bn": "BN-Bangla",
"de": "DE-German",
"en": "EN-English",
"es": "ES-Spanish",
"fa": "FA-Farsi",
"hi": "HI-Hindi",
"ko": "KO-Korean",
"nl": "NL-Dutch",
"ru": "RU-Russian",
"tr": "TR-Turkish",
"zh": "ZH-Chinese",
"multi": "MULTI_Multilingual",
"mix": "MIX_Code_mixed",
}
class MultiCoNERConfig(datasets.BuilderConfig):
"""BuilderConfig for MultiCoNER"""
def __init__(self, **kwargs):
"""BuilderConfig for MultiCoNER.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(MultiCoNERConfig, self).__init__(**kwargs)
class MultiCoNER(datasets.GeneratorBasedBuilder):
"""MultiCoNER dataset."""
BUILDER_CONFIGS = [
MultiCoNERConfig(
name="bn",
version=datasets.Version("1.0.0"),
description="MultiCoNER Bangla dataset",
),
MultiCoNERConfig(
name="de",
version=datasets.Version("1.0.0"),
description="MultiCoNER German dataset",
),
MultiCoNERConfig(
name="en",
version=datasets.Version("1.0.0"),
description="MultiCoNER English dataset",
),
MultiCoNERConfig(
name="es",
version=datasets.Version("1.0.0"),
description="MultiCoNER Spanish dataset",
),
MultiCoNERConfig(
name="fa",
version=datasets.Version("1.0.0"),
description="MultiCoNER Farsi dataset",
),
MultiCoNERConfig(
name="hi",
version=datasets.Version("1.0.0"),
description="MultiCoNER Hindi dataset",
),
MultiCoNERConfig(
name="ko",
version=datasets.Version("1.0.0"),
description="MultiCoNER Korean dataset",
),
MultiCoNERConfig(
name="nl",
version=datasets.Version("1.0.0"),
description="MultiCoNER Dutch dataset",
),
MultiCoNERConfig(
name="ru",
version=datasets.Version("1.0.0"),
description="MultiCoNER Russian dataset",
),
MultiCoNERConfig(
name="tr",
version=datasets.Version("1.0.0"),
description="MultiCoNER Turkish dataset",
),
MultiCoNERConfig(
name="zh",
version=datasets.Version("1.0.0"),
description="MultiCoNER Chinese dataset",
),
MultiCoNERConfig(
name="multi",
version=datasets.Version("1.0.0"),
description="MultiCoNER Multilingual dataset",
),
MultiCoNERConfig(
name="mix",
version=datasets.Version("1.0.0"),
description="MultiCoNER Mixed dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-LOC",
"I-LOC",
"B-CORP",
"I-CORP",
"B-GRP",
"I-GRP",
"B-PROD",
"I-PROD",
"B-CW",
"I-CW",
]
)
),
}
),
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{subset_to_dir[self.config.name]}/{self.config.name}_train.conll",
"dev": f"{subset_to_dir[self.config.name]}/{self.config.name}_dev.conll",
"test": f"{subset_to_dir[self.config.name]}/{self.config.name}_test.conll",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": downloaded_files["dev"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": downloaded_files["test"]},
),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, "r", encoding="utf8") as f:
guid = -1
tokens = []
ner_tags = []
for line in f:
if line.strip().startswith("# id"):
guid += 1
tokens = []
ner_tags = []
elif " _ _ " in line:
# Separator is " _ _ "
splits = line.split(" _ _ ")
tokens.append(splits[0].strip())
ner_tags.append(splits[1].strip())
elif len(line.strip()) == 0:
if len(tokens) >= 1 and len(tokens) == len(ner_tags):
yield guid, {
"id": guid,
"tokens": tokens,
"ner_tags": ner_tags,
}
tokens = []
ner_tags = []
else:
continue
if len(tokens) >= 1 and len(tokens) == len(ner_tags):
yield guid, {
"id": guid,
"tokens": tokens,
"ner_tags": ner_tags,
}