Datasets:

ArXiv:
Libraries:
Datasets
License:
File size: 3,975 Bytes
eb52810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
"""
Dataset from https://github.com/allenai/sequential_sentence_classification

Dataset maintainer: @soldni
"""


import json
from typing import Iterable, Sequence, Tuple

import datasets
from datasets.builder import BuilderConfig, GeneratorBasedBuilder
from datasets.info import DatasetInfo
from datasets.splits import Split, SplitGenerator
from datasets.utils.logging import get_logger

LOGGER = get_logger(__name__)


_NAME = "CSAbstruct"
_CITATION = """\
@inproceedings{Cohan2019EMNLP,
  title={Pretrained Language Models for Sequential Sentence Classification},
  author={Arman Cohan, Iz Beltagy, Daniel King, Bhavana Dalvi, Dan Weld},
  year={2019},
  booktitle={EMNLP},
}
"""
_LICENSE = "Apache License 2.0"
_DESCRIPTION = """\
As a step toward better document-level understanding, we explore \
classification of a sequence of sentences into their corresponding \
categories, a task that requires understanding sentences in context \
of the document. Recent successful models for this task have used \
hierarchical models to contextualize sentence representations, and \
Conditional Random Fields (CRFs) to incorporate dependencies between \
subsequent labels. In this work, we show that pretrained language \
models, BERT (Devlin et al., 2018) in particular, can be used for \
this task to capture contextual dependencies without the need for \
hierarchical encoding nor a CRF. Specifically, we construct a joint \
sentence representation that allows BERT Transformer layers to \
directly utilize contextual information from all words in all \
sentences. Our approach achieves state-of-the-art results on four \
datasets, including a new dataset of structured scientific abstracts.
"""
_HOMEPAGE = "https://github.com/allenai/sequential_sentence_classification"
_VERSION = "1.0.0"

_URL = (
    "https://raw.githubusercontent.com/allenai/"
    "sequential_sentence_classification/master/"
)

_SPLITS = {
    Split.TRAIN: _URL + "data/CSAbstruct/train.jsonl",
    Split.VALIDATION: _URL + "data/CSAbstruct/dev.jsonl",
    Split.TEST: _URL + "data/CSAbstruct/test.jsonl",
}


class CSAbstruct(GeneratorBasedBuilder):
    """CSAbstruct"""

    BUILDER_CONFIGS = [
        BuilderConfig(
            name=_NAME,
            version=datasets.Version(_VERSION),
            description=_DESCRIPTION,
        )
    ]

    def _info(self) -> DatasetInfo:
        class_labels = ["background", "method", "objective", "other", "result"]

        features = datasets.Features(
            {
                "abstract_id": datasets.Value("string"),
                "sentences": [datasets.Value("string")],
                "labels": [datasets.ClassLabel(names=class_labels)],
                "confs": [datasets.Value("float")],
            }
        )

        return DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(
        self, dl_manager: datasets.DownloadManager
    ) -> Sequence[SplitGenerator]:
        archive = dl_manager.download(_SPLITS)

        return [
            SplitGenerator(
                name=split_name,  # type: ignore
                gen_kwargs={
                    "split_name": split_name,
                    "filepath": archive[split_name],  # type: ignore
                },
            )
            for split_name in _SPLITS
        ]

    def _generate_examples(
        self, split_name: str, filepath: str
    ) -> Iterable[Tuple[str, dict]]:
        """This function returns the examples in the raw (text) form."""

        LOGGER.info(f"generating examples from documents in {filepath}...")

        with open(filepath, mode="r", encoding="utf-8") as f:
            data = [json.loads(ln) for ln in f]

        for i, row in enumerate(data):
            row["abstract_id"] = f"{split_name}_{i:04d}"
            yield row["abstract_id"], row