Datasets:

Languages:
Hebrew
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
bmc / bmc.py
imvladikon's picture
Update bmc.py
a465c37
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@mastersthesis{naama,
title={Hebrew Named Entity Recognition},
author={Ben-Mordecai, Naama},
advisor={Elhadad, Michael},
year={2005},
url="https://www.cs.bgu.ac.il/~elhadad/nlpproj/naama/",
institution={Department of Computer Science, Ben-Gurion University},
school={Department of Computer Science, Ben-Gurion University},
},
@misc{bareket2020neural,
title={Neural Modeling for Named Entities and Morphology (NEMO^2)},
author={Dan Bareket and Reut Tsarfaty},
year={2020},
eprint={2007.15620},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
"""
SPLITS = ["split1", "split2", "split3"]
class BMCConfig(datasets.BuilderConfig):
"""BuilderConfig for BMC"""
def __init__(self, **kwargs):
"""BuilderConfig for BMC.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(BMCConfig, self).__init__(**kwargs)
class BMC(datasets.GeneratorBasedBuilder):
"""BMC dataset."""
BUILDER_CONFIGS = [
BMCConfig(name=split, version=datasets.Version("1.0.0"), description="BMC dataset")
for split in SPLITS
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"raw_tags": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
'B-DATE',
'I-DATE',
'S-DATE',
'E-DATE',
'B-LOC',
'E-LOC',
'S-LOC',
'I-LOC',
'E-MONEY',
'B-MONEY',
'S-MONEY',
'I-MONEY',
'O',
'S-ORG',
'E-ORG',
'I-ORG',
'B-ORG',
'B-PER',
'E-PER',
'I-PER',
'S-PER',
'B-PERCENT',
'S-PERCENT',
'E-PERCENT',
'I-PERCENT',
'E-TIME',
'I-TIME',
'B-TIME',
'S-TIME'
]
)
),
}
),
supervised_keys=None,
homepage="https://www.cs.bgu.ac.il/~elhadad/nlpproj/naama/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
folder = f"data/{self.config.name}"
data_files = {
"train": dl_manager.download(os.path.join(folder, "bmc_split.train.bmes")),
"validation": dl_manager.download(os.path.join(folder, "bmc_split.dev.bmes")),
"test": dl_manager.download(os.path.join(folder, "bmc_split.test.bmes")),
}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["validation"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
]
def _generate_examples(self, filepath, sep = " "):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
raw_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"raw_tags": raw_tags,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
raw_tags = []
ner_tags = []
else:
splits = line.split(sep)
tokens.append(splits[0])
raw_tags.append(splits[1].rstrip())
ner_tags.append(splits[1].rstrip())
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"raw_tags": raw_tags,
"ner_tags": ner_tags,
}