Datasets:

ArXiv:
Tags:
License:
soldni commited on
Commit
eb52810
1 Parent(s): a93c43d

first upload

Browse files
Files changed (1) hide show
  1. csabstruct.py +121 -0
csabstruct.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Dataset from https://github.com/allenai/sequential_sentence_classification
3
+
4
+ Dataset maintainer: @soldni
5
+ """
6
+
7
+
8
+ import json
9
+ from typing import Iterable, Sequence, Tuple
10
+
11
+ import datasets
12
+ from datasets.builder import BuilderConfig, GeneratorBasedBuilder
13
+ from datasets.info import DatasetInfo
14
+ from datasets.splits import Split, SplitGenerator
15
+ from datasets.utils.logging import get_logger
16
+
17
+ LOGGER = get_logger(__name__)
18
+
19
+
20
+ _NAME = "CSAbstruct"
21
+ _CITATION = """\
22
+ @inproceedings{Cohan2019EMNLP,
23
+ title={Pretrained Language Models for Sequential Sentence Classification},
24
+ author={Arman Cohan, Iz Beltagy, Daniel King, Bhavana Dalvi, Dan Weld},
25
+ year={2019},
26
+ booktitle={EMNLP},
27
+ }
28
+ """
29
+ _LICENSE = "Apache License 2.0"
30
+ _DESCRIPTION = """\
31
+ As a step toward better document-level understanding, we explore \
32
+ classification of a sequence of sentences into their corresponding \
33
+ categories, a task that requires understanding sentences in context \
34
+ of the document. Recent successful models for this task have used \
35
+ hierarchical models to contextualize sentence representations, and \
36
+ Conditional Random Fields (CRFs) to incorporate dependencies between \
37
+ subsequent labels. In this work, we show that pretrained language \
38
+ models, BERT (Devlin et al., 2018) in particular, can be used for \
39
+ this task to capture contextual dependencies without the need for \
40
+ hierarchical encoding nor a CRF. Specifically, we construct a joint \
41
+ sentence representation that allows BERT Transformer layers to \
42
+ directly utilize contextual information from all words in all \
43
+ sentences. Our approach achieves state-of-the-art results on four \
44
+ datasets, including a new dataset of structured scientific abstracts.
45
+ """
46
+ _HOMEPAGE = "https://github.com/allenai/sequential_sentence_classification"
47
+ _VERSION = "1.0.0"
48
+
49
+ _URL = (
50
+ "https://raw.githubusercontent.com/allenai/"
51
+ "sequential_sentence_classification/master/"
52
+ )
53
+
54
+ _SPLITS = {
55
+ Split.TRAIN: _URL + "data/CSAbstruct/train.jsonl",
56
+ Split.VALIDATION: _URL + "data/CSAbstruct/dev.jsonl",
57
+ Split.TEST: _URL + "data/CSAbstruct/test.jsonl",
58
+ }
59
+
60
+
61
+ class CSAbstruct(GeneratorBasedBuilder):
62
+ """CSAbstruct"""
63
+
64
+ BUILDER_CONFIGS = [
65
+ BuilderConfig(
66
+ name=_NAME,
67
+ version=datasets.Version(_VERSION),
68
+ description=_DESCRIPTION,
69
+ )
70
+ ]
71
+
72
+ def _info(self) -> DatasetInfo:
73
+ class_labels = ["background", "method", "objective", "other", "result"]
74
+
75
+ features = datasets.Features(
76
+ {
77
+ "abstract_id": datasets.Value("string"),
78
+ "sentences": [datasets.Value("string")],
79
+ "labels": [datasets.ClassLabel(names=class_labels)],
80
+ "confs": [datasets.Value("float")],
81
+ }
82
+ )
83
+
84
+ return DatasetInfo(
85
+ description=_DESCRIPTION,
86
+ features=features,
87
+ supervised_keys=None,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+
93
+ def _split_generators(
94
+ self, dl_manager: datasets.DownloadManager
95
+ ) -> Sequence[SplitGenerator]:
96
+ archive = dl_manager.download(_SPLITS)
97
+
98
+ return [
99
+ SplitGenerator(
100
+ name=split_name, # type: ignore
101
+ gen_kwargs={
102
+ "split_name": split_name,
103
+ "filepath": archive[split_name], # type: ignore
104
+ },
105
+ )
106
+ for split_name in _SPLITS
107
+ ]
108
+
109
+ def _generate_examples(
110
+ self, split_name: str, filepath: str
111
+ ) -> Iterable[Tuple[str, dict]]:
112
+ """This function returns the examples in the raw (text) form."""
113
+
114
+ LOGGER.info(f"generating examples from documents in {filepath}...")
115
+
116
+ with open(filepath, mode="r", encoding="utf-8") as f:
117
+ data = [json.loads(ln) for ln in f]
118
+
119
+ for i, row in enumerate(data):
120
+ row["abstract_id"] = f"{split_name}_{i:04d}"
121
+ yield row["abstract_id"], row