Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
License:
named_timexes / named_timexes.py
leondz's picture
Correct the file used for the train partition
fd82f8b
# coding=utf-8
# Copyright 2022 Leon Derczynski
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Named Temporal Expressions corpus (English)"""
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{brucato-etal-2013-recognising,
title = "Recognising and Interpreting Named Temporal Expressions",
author = "Brucato, Matteo and
Derczynski, Leon and
Llorens, Hector and
Bontcheva, Kalina and
Jensen, Christian S.",
booktitle = "Proceedings of the International Conference Recent Advances in Natural Language Processing {RANLP} 2013",
month = sep,
year = "2013",
address = "Hissar, Bulgaria",
publisher = "INCOMA Ltd. Shoumen, BULGARIA",
url = "https://aclanthology.org/R13-1015",
pages = "113--121",
}
"""
_DESCRIPTION = """\
This is a dataset annotated for _named temporal expression_ chunks.
The
commonest temporal expressions typically
contain date and time words, like April or
hours. Research into recognising and interpreting these typical expressions is mature in many languages. However, there is
a class of expressions that are less typical,
very varied, and difficult to automatically
interpret. These indicate dates and times,
but are harder to detect because they often do not contain time words and are not
used frequently enough to appear in conventional temporally-annotated corpora –
for example *Michaelmas* or *Vasant Panchami*.
For more details see [https://aclanthology.org/R13-1015.pdf](https://aclanthology.org/R13-1015.pdf)
"""
_URL = "http://www.derczynski.com/resources/named_timex.tar.bz2"
_TRAIN_FILE = "ntimex-train.conll"
_TEST_FILE = "ntimex-eval.conll"
class NamedTimexesConfig(datasets.BuilderConfig):
"""BuilderConfig for NamedTimexes"""
def __init__(self, **kwargs):
"""BuilderConfig for NamedTimexes.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(NamedTimexesConfig, self).__init__(**kwargs)
class NamedTimexes(datasets.GeneratorBasedBuilder):
"""NamedTimexes dataset."""
BUILDER_CONFIGS = [
NamedTimexesConfig(name="named-timexes", version=datasets.Version("1.0.0"), description="Named Temporal Expressions dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ntimex_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"T",
]
)
),
}
),
supervised_keys=None,
homepage="https://aclanthology.org/R13-1015.pdf",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_file = dl_manager.download_and_extract(_URL)
data_files = {
"train": os.path.join(downloaded_file, _TRAIN_FILE),
"test": os.path.join(downloaded_file, _TEST_FILE),
}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
]
def _generate_examples(self, filepath):
guid = 0
with open(filepath, encoding="utf-8") as f:
logger.info("⏳ Generating examples from = %s", filepath)
tokens = []
ntimex_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line.strip() == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ntimex_tags": ntimex_tags,
}
guid += 1
tokens = []
ntimex_tags = []
else:
# btc entries are tab separated
fields = line.split("\t")
tokens.append(fields[0])
ntimex_tags.append(fields[1].rstrip())
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"ntimex_tags": ntimex_tags,
}