|
""" |
|
Dataset from https://github.com/allenai/sequential_sentence_classification |
|
|
|
Dataset maintainer: @soldni |
|
""" |
|
|
|
|
|
import json |
|
from typing import Iterable, Sequence, Tuple |
|
|
|
import datasets |
|
from datasets.builder import BuilderConfig, GeneratorBasedBuilder |
|
from datasets.info import DatasetInfo |
|
from datasets.splits import Split, SplitGenerator |
|
from datasets.utils.logging import get_logger |
|
|
|
LOGGER = get_logger(__name__) |
|
|
|
|
|
_NAME = "CSAbstruct" |
|
_CITATION = """\ |
|
@inproceedings{Cohan2019EMNLP, |
|
title={Pretrained Language Models for Sequential Sentence Classification}, |
|
author={Arman Cohan, Iz Beltagy, Daniel King, Bhavana Dalvi, Dan Weld}, |
|
year={2019}, |
|
booktitle={EMNLP}, |
|
} |
|
""" |
|
_LICENSE = "Apache License 2.0" |
|
_DESCRIPTION = """\ |
|
As a step toward better document-level understanding, we explore \ |
|
classification of a sequence of sentences into their corresponding \ |
|
categories, a task that requires understanding sentences in context \ |
|
of the document. Recent successful models for this task have used \ |
|
hierarchical models to contextualize sentence representations, and \ |
|
Conditional Random Fields (CRFs) to incorporate dependencies between \ |
|
subsequent labels. In this work, we show that pretrained language \ |
|
models, BERT (Devlin et al., 2018) in particular, can be used for \ |
|
this task to capture contextual dependencies without the need for \ |
|
hierarchical encoding nor a CRF. Specifically, we construct a joint \ |
|
sentence representation that allows BERT Transformer layers to \ |
|
directly utilize contextual information from all words in all \ |
|
sentences. Our approach achieves state-of-the-art results on four \ |
|
datasets, including a new dataset of structured scientific abstracts. |
|
""" |
|
_HOMEPAGE = "https://github.com/allenai/sequential_sentence_classification" |
|
_VERSION = "1.0.0" |
|
|
|
_URL = ( |
|
"https://raw.githubusercontent.com/allenai/" |
|
"sequential_sentence_classification/master/" |
|
) |
|
|
|
_SPLITS = { |
|
Split.TRAIN: _URL + "data/CSAbstruct/train.jsonl", |
|
Split.VALIDATION: _URL + "data/CSAbstruct/dev.jsonl", |
|
Split.TEST: _URL + "data/CSAbstruct/test.jsonl", |
|
} |
|
|
|
|
|
class CSAbstruct(GeneratorBasedBuilder): |
|
"""CSAbstruct""" |
|
|
|
BUILDER_CONFIGS = [ |
|
BuilderConfig( |
|
name=_NAME, |
|
version=datasets.Version(_VERSION), |
|
description=_DESCRIPTION, |
|
) |
|
] |
|
|
|
def _info(self) -> DatasetInfo: |
|
class_labels = ["background", "method", "objective", "other", "result"] |
|
|
|
features = datasets.Features( |
|
{ |
|
"abstract_id": datasets.Value("string"), |
|
"sentences": [datasets.Value("string")], |
|
"labels": [datasets.ClassLabel(names=class_labels)], |
|
"confs": [datasets.Value("float")], |
|
} |
|
) |
|
|
|
return DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> Sequence[SplitGenerator]: |
|
archive = dl_manager.download(_SPLITS) |
|
|
|
return [ |
|
SplitGenerator( |
|
name=split_name, |
|
gen_kwargs={ |
|
"split_name": split_name, |
|
"filepath": archive[split_name], |
|
}, |
|
) |
|
for split_name in _SPLITS |
|
] |
|
|
|
def _generate_examples( |
|
self, split_name: str, filepath: str |
|
) -> Iterable[Tuple[str, dict]]: |
|
"""This function returns the examples in the raw (text) form.""" |
|
|
|
LOGGER.info(f"generating examples from documents in {filepath}...") |
|
|
|
with open(filepath, mode="r", encoding="utf-8") as f: |
|
data = [json.loads(ln) for ln in f] |
|
|
|
for i, row in enumerate(data): |
|
row["abstract_id"] = f"{split_name}_{i:04d}" |
|
yield row["abstract_id"], row |
|
|