File size: 4,879 Bytes
cec0c77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6c16183
cec0c77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d155896
cec0c77
d155896
 
 
 
 
 
cec0c77
 
 
 
 
 
 
 
 
 
 
 
 
3ddddf4
cec0c77
 
 
 
 
 
 
 
4399c10
d73e1f3
aace6eb
 
cec0c77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d155896
 
 
 
6cd65c2
cec0c77
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import json
import os

import datasets
from datasets.tasks import TextClassification

_CITATION = None


_DESCRIPTION = """
 PubMed dataset for summarization.
 From paper: A Discourse-Aware Attention Model for Abstractive Summarization of Long Documents" by A. Cohan et al.
 See: https://aclanthology.org/N18-2097.pdf 
 See: https://github.com/armancohan/long-summarization
"""
_CITATION = """\
    @inproceedings{cohan-etal-2018-discourse,
    title = "A Discourse-Aware Attention Model for Abstractive Summarization of Long Documents",
    author = "Cohan, Arman  and
      Dernoncourt, Franck  and
      Kim, Doo Soon  and
      Bui, Trung  and
      Kim, Seokhwan  and
      Chang, Walter  and
      Goharian, Nazli",
    booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)",
    month = jun,
    year = "2018",
    address = "New Orleans, Louisiana",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/N18-2097",
    doi = "10.18653/v1/N18-2097",
    pages = "615--621",
    abstract = "Neural abstractive summarization models have led to promising results in summarizing relatively short documents. We propose the first model for abstractive summarization of single, longer-form documents (e.g., research papers). Our approach consists of a new hierarchical encoder that models the discourse structure of a document, and an attentive discourse-aware decoder to generate the summary. Empirical results on two large-scale datasets of scientific papers show that our model significantly outperforms state-of-the-art models.",
}
"""
_ABSTRACT = "abstract"
_ARTICLE = "article"

class PubMedSummarizationConfig(datasets.BuilderConfig):
    """BuilderConfig for PubMedSummarization."""

    def __init__(self, **kwargs):
        """BuilderConfig for PubMedSummarization.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(PubMedSummarizationConfig, self).__init__(**kwargs)


class PubMedSummarizationDataset(datasets.GeneratorBasedBuilder):
    """PubMedSummarization Dataset."""
    
    _TRAIN_FILE = "train.zip"
    _VAL_FILE = "val.zip"
    _TEST_FILE = "test.zip"

    BUILDER_CONFIGS = [
        PubMedSummarizationConfig(
            name="section",
            version=datasets.Version("1.0.0"),
            description="PubMed dataset for summarization, concat sections",
        ),
        PubMedSummarizationConfig(
            name="document",
            version=datasets.Version("1.0.0"),
            description="PubMed dataset for summarization, document",
        ),
    ]

    DEFAULT_CONFIG_NAME = "pubmed"

    def _info(self):
        # Should return a datasets.DatasetInfo object
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    _ARTICLE: datasets.Value("string"),
                    _ABSTRACT: datasets.Value("string"),
                    #"id": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/armancohan/long-summarization",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):

        train_path = dl_manager.download_and_extract(self._TRAIN_FILE) + "/train.txt"
        val_path = dl_manager.download_and_extract(self._VAL_FILE) + "/val.txt"
        test_path = dl_manager.download_and_extract(self._TEST_FILE) + "/test.txt"
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
            ),
        ]
    
    def _generate_examples(self, filepath):
        """Generate PubMedSummarization examples."""
        with open(filepath, encoding="utf-8") as f:
            for id_, row in enumerate(f):
                data = json.loads(row)

                """
                'article_id': str,
                'abstract_text': List[str],
                'article_text': List[str],
                'section_names': List[str],
                'sections': List[List[str]]
                """
                if self.config.name == "section":
                    article = data["article_text"]
                else:
                    article = [item.strip() for sublist in data["sections"] for item in sublist]
                abstract = data["abstract_text"]
                yield id_, {"article": ' '.join(article), "abstract": ' '.join(abstract)}