biofid / biofid.py
Stefan Schweter
dataset: add initial version
51a59c3
raw
history blame contribute delete
No virus
10.6 kB
# coding=utf-8
# Copyright 2023 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BIOfid NER Dataset"""
import json
import datasets
_CITATION = """\
@inproceedings{ahmed-etal-2019-biofid,
title = "{BIO}fid Dataset: Publishing a {G}erman Gold Standard for Named Entity Recognition in Historical Biodiversity Literature",
author = "Ahmed, Sajawel and
Stoeckel, Manuel and
Driller, Christine and
Pachzelt, Adrian and
Mehler, Alexander",
booktitle = "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/K19-1081",
doi = "10.18653/v1/K19-1081",
pages = "871--880",
abstract = "The Specialized Information Service Biodiversity Research (BIOfid) has been launched to mobilize valuable biological data from printed literature hidden in German libraries for over the past 250 years. In this project, we annotate German texts converted by OCR from historical scientific literature on the biodiversity of plants, birds, moths and butterflies. Our work enables the automatic extraction of biological information previously buried in the mass of papers and volumes. For this purpose, we generated training data for the tasks of Named Entity Recognition (NER) and Taxa Recognition (TR) in biological documents. We use this data to train a number of leading machine learning tools and create a gold standard for TR in biodiversity literature. More specifically, we perform a practical analysis of our newly generated BIOfid dataset through various downstream-task evaluations and establish a new state of the art for TR with 80.23{\%} F-score. In this sense, our paper lays the foundations for future work in the field of information extraction in biology texts.",
}
"""
_LICENSE = """\
By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
"""
_DESCRIPTION = """\
# Introduction
The Specialized Information Service Biodiversity Research (BIOfid) has been launched to mobilize valuable biological data
from printed literature hidden in German libraries for over the past 250 years. In this project, we annotate German texts
converted by OCR from historical scientific literature on the biodiversity of plants, birds, moths and butterflies.
Our work enables the automatic extraction of biological information previously buried in the mass of papers and volumes.
For this purpose, we generated training data for the tasks of Named Entity Recognition (NER) and Taxa Recognition (TR) in
biological documents. We use this data to train a number of leading machine learning tools and create a gold standard for
TR in biodiversity literature. More specifically, we perform a practical analysis of our newly generated BIOfid dataset
through various downstream-task evaluations and establish a new state of the art for TR with 80.23% F-score.
In this sense, our paper lays the foundations for future work in the field of information extraction in biology texts.
# Dataset
## Corpus
The BIOfid Corpus is a collection of historical scientific books on central Euro- pean biodiversity.
It was assembled by a group of German domain experts, denoting a potential pool of relevant print-only journals and
publications for historical biodiversity science. However, mainly due to license issues, not all publications could
be considered for the corpus.
The available publications were scanned by an external service and subsequently paginated with the software Visual
Library. Subsequently, every high-resolution page (400 dpi) was digitized with ABBYY FineReader 8.0 (2005) to
ABBYY-XML, which includes structural information like para- graphs, bold/italic text, images, and table blocks.
## Named Entities (NEs)
NEs are real-world objects in a given natural language text which denote a unique individual with a proper name
(e.g. Frankfurt, Africa, Linnaeus, BHL). This stands in contrast to the class of common names which refer to some
kind of entities (e.g. city, continent, person, cor- poration) and not a uniquely identifiable object.
The standard task of NER focuses on the former class of proper names. However, it is often not easy to
differentiate between both classes. Hence, to support the annotators in making the right decision, we created
guidelines which demonstrated the rules for annotations. We gradually developed this document in collaboration with
the annotators, until finalizing it as the guidelines for annotating the BIOfid corpus.
As we essentially extend the standard task of NER to our scope of biodiversity, our guidelines are built upon those
used for producing the Ger- mEval dataset (Benikova et al., 2014). For this, we take the original German text and
extend it with the important adjustments described in the next paragraphs for the context of biodiversity.
In contrast to Benikova et al. (2014), we do not consider derivative or partial NEs as a separate category.
As the recent work of Ahmed an Mehler (2018) has shown, discarding subtle details is even beneficial, whereas
fine-graded feature engineering for deep neural networks usually deteriorates the final per- formance.
## Data Format
We use the 4-column CoNLL-format which writes each word of a sentence horizontally along its lemma, POS tag and
gold label, separating each sentence by an empty new line. For the tag- ging scheme, we opt for BIO (IOB2).
The entities ORGANIZATION, OTHER, TIME, PERSON, LOCATION, TAXON are marked by our team of annotators for a given
sentence from the BIOfid corpus.
We split the BIOfid dataset into train, dev, test files by the common ratio of 80:10:10 percentages after
randomizing its order of sentences.
"""
_VERSION = "1.0.0"
_HOMEPAGE_URL = "https://github.com/texttechnologylab/BIOfid/tree/master/BIOfid-Dataset-NER"
class BIOfidConfig(datasets.BuilderConfig):
"""BuilderConfig for BIOfid NER Dataset."""
def __init__(self, **kwargs):
"""BuilderConfig for BIOfid NER Dataset.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(BIOfidConfig, self).__init__(**kwargs)
class BIOfid(datasets.GeneratorBasedBuilder):
"""GermEval 2014 NER Shared Task."""
BUILDER_CONFIGS = [
BIOfidConfig(
name="biofids", version=datasets.Version("1.0.0"), description="BIOfid NER Dataset"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-LOC",
"I-LOC",
"B-ORG",
"I-ORG",
"B-OTHER",
"I-OTHER",
"B-PER",
"I-PER",
"B-TAX",
"I-TAX",
"B-TME",
"I-TME",
]
)
),
"ner_t5_output": datasets.Value("string"),
"ner_own_output": datasets.Value("string"),
}
),
supervised_keys=None,
license=_LICENSE,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
import flair
from flair.datasets import NER_GERMAN_BIOFID
corpus = NER_GERMAN_BIOFID()
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"sentences": corpus.train}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"sentences": corpus.dev}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"sentences": corpus.test}),
]
def _generate_examples(self, sentences):
for counter, sentence in enumerate(sentences):
original_spans = sentence.get_spans("ner")
original_tokens = []
original_tags = []
t5_spans = []
own_spans = []
for index, token in enumerate(sentence.tokens):
original_tag = "O"
for span in original_spans:
if token in span:
original_tag = "B-" + span.tag if token == span[0] else "I-" + span.tag
original_tokens.append(sentence[index].text)
original_tags.append(original_tag)
for span in original_spans:
span_text = " ".join(token.text for token in span.tokens)
t5_span = f"{span.tag} : {span_text}"
own_span = f"{span.tag} = {span_text}"
t5_spans.append(t5_span)
own_spans.append(own_span)
ner_t5_output = " || ".join(t5_spans)
ner_own_output = " || ".join(own_spans)
yield counter, {
"tokens": original_tokens,
"ner_tags": original_tags,
"ner_t5_output": ner_t5_output,
"ner_own_output": ner_own_output,
}