Datasets:

Languages:
Estonian
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
License:
noisyner / noisyner.py
phucdev's picture
Add data files, loading script and update README.md
43aa529
import datasets
logger = datasets.logging.get_logger(__name__)
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@inproceedings{hedderich2021analysing,
title={Analysing the Noise Model Error for Realistic Noisy Label Data},
author={Hedderich, Michael A and Zhu, Dawei and Klakow, Dietrich},
booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
volume={35},
number={9},
pages={7675--7684},
year={2021}
}
@inproceedings{tkachenko-etal-2013-named,
title = "Named Entity Recognition in {E}stonian",
author = "Tkachenko, Alexander and Petmanson, Timo and Laur, Sven",
booktitle = "Proceedings of the 4th Biennial International Workshop on {B}alto-{S}lavic Natural Language Processing",
year = "2013",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W13-2412",
}
"""
# You can copy an official description
_DESCRIPTION = """\
NoisyNER is a dataset for the evaluation of methods to handle noisy labels when training machine learning models.
It is from the NLP/Information Extraction domain and was created through a realistic distant supervision technique.
Some highlights and interesting aspects of the data are:
- Seven sets of labels with differing noise patterns to evaluate different noise levels on the same instances
- Full parallel clean labels available to compute upper performance bounds or study scenarios where a small amount of
gold-standard data can be leveraged
- Skewed label distribution (typical for Named Entity Recognition tasks)
- For some label sets: noise level higher than the true label probability
- Sequential dependencies between the labels
For more details on the dataset and its creation process, please refer to our publication
https://ojs.aaai.org/index.php/AAAI/article/view/16938 (published at AAAI'21).
"""
_HOMEPAGE = "https://github.com/uds-lsv/NoisyNER"
_LICENSE = "The original dataset is licensed under CC-BY-NC. We provide our noisy labels under CC-BY 4.0."
_URL = "https://huggingface.co/datasets/phuctrg/noisyner/raw/main/data"
class NoisyNER(datasets.GeneratorBasedBuilder):
"""
NoisyNER is a dataset for the evaluation of methods to handle noisy labels when training machine learning models.
"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="estner_clean", version=VERSION, description="EstNER dataset with clean labels"
),
datasets.BuilderConfig(
name="NoisyNER_labelset1", version=VERSION,
description="NoisyNER dataset label set 1 "
"with automatic annotation via distant supervision based ANEA tool with no heuristics"
),
datasets.BuilderConfig(
name="NoisyNER_labelset2", version=VERSION,
description="NoisyNER dataset label set 2 "
"with automatic annotation via distant supervision based ANEA tool and "
"applying Estonian lemmatization to normalize the words"
),
datasets.BuilderConfig(
name="NoisyNER_labelset3", version=VERSION,
description="NoisyNER dataset label set 3 "
"with automatic annotation via distant supervision based ANEA tool and "
"splitting person entity names in the list, i.e. both first and last names can be matched "
"separately. Person names must have a minimum length of 4. Also, lemmatization"
),
datasets.BuilderConfig(
name="NoisyNER_labelset4", version=VERSION,
description="NoisyNER dataset label set 4 "
"with automatic annotation via distant supervision based ANEA tool and if entity names from "
"two different lists match the same word, location entities are preferred. "
"Also, lemmatization."
),
datasets.BuilderConfig(
name="NoisyNER_labelset5", version=VERSION,
description="NoisyNER dataset label set 5 "
"with automatic annotation via distant supervision based ANEA tool and "
"Locations preferred, lemmatization, splitting names with minimum length 4."
),
datasets.BuilderConfig(
name="NoisyNER_labelset6", version=VERSION,
description="NoisyNER dataset label set 6 "
"with automatic annotation via distant supervision based ANEA tool and "
"removing the entity names 'kohta', 'teine', 'naine' and 'mees' from the list of person names "
"(high false positive rate). Also, all of label set 5."
),
datasets.BuilderConfig(
name="NoisyNER_labelset7", version=VERSION,
description="NoisyNER dataset label set 7 "
"with automatic annotation via distant supervision based ANEA tool and using alternative, "
"alias names for organizations. Using additionally the identifiers Q82794, Q3957, Q7930989, "
"Q5119 and Q11881845 for locations and Q1572070 and Q7278 for organizations. "
"Also, all of label set 6."
),
]
DEFAULT_CONFIG_NAME = "estner_clean"
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"lemmas": datasets.Sequence(datasets.Value("string")),
"grammar": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC"
]
)
),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
_URLS = {
str(datasets.Split.TRAIN): f'{_URL}/{self.config.name}_train.tsv',
str(datasets.Split.VALIDATION): f'{_URL}/{self.config.name}_dev.tsv',
str(datasets.Split.TEST): f'{_URL}/{self.config.name}_test.tsv',
}
downloaded_files = dl_manager.download_and_extract(_URLS)
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_files[str(i)]})
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
lemmas = []
grammar_infos = []
ner_tags = []
for line in f:
if line in ["--", "", "\n", "--\n"]:
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"lemmas": lemmas,
"grammar": grammar_infos,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
lemmas = []
grammar_infos = []
ner_tags = []
else:
splits = line.split("\t")
tokens.append(splits[0])
lemmas.append(splits[1])
grammar_infos.append(splits[2])
ner_tags.append(splits[3].rstrip())
# last example
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"lemmas": lemmas,
"grammar": grammar_infos,
"ner_tags": ner_tags,
}