system HF staff commited on
Commit
c421f65
1 Parent(s): d448d3b

import from S3

Browse files
scientific_papers.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Scientific Papers Dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """
28
+ @article{Cohan_2018,
29
+ title={A Discourse-Aware Attention Model for Abstractive Summarization of
30
+ Long Documents},
31
+ url={http://dx.doi.org/10.18653/v1/n18-2097},
32
+ DOI={10.18653/v1/n18-2097},
33
+ journal={Proceedings of the 2018 Conference of the North American Chapter of
34
+ the Association for Computational Linguistics: Human Language
35
+ Technologies, Volume 2 (Short Papers)},
36
+ publisher={Association for Computational Linguistics},
37
+ author={Cohan, Arman and Dernoncourt, Franck and Kim, Doo Soon and Bui, Trung and Kim, Seokhwan and Chang, Walter and Goharian, Nazli},
38
+ year={2018}
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """
43
+ Scientific papers datasets contains two sets of long and structured documents.
44
+ The datasets are obtained from ArXiv and PubMed OpenAccess repositories.
45
+
46
+ Both "arxiv" and "pubmed" have two features:
47
+ - article: the body of the document, pagragraphs seperated by "/n".
48
+ - abstract: the abstract of the document, pagragraphs seperated by "/n".
49
+ - section_names: titles of sections, seperated by "/n".
50
+
51
+ """
52
+
53
+ _DOCUMENT = "article"
54
+ _SUMMARY = "abstract"
55
+
56
+ _URLS = {
57
+ "arxiv": "https://s3.amazonaws.com/datasets.huggingface.co/nlp/datasets/scientific_papers/dummy/arxiv/1.1.1/dummy_data.zip",
58
+ "pubmed": "https://s3.amazonaws.com/datasets.huggingface.co/nlp/datasets/scientific_papers/dummy/pubmed/1.1.1/dummy_data.zip",
59
+ }
60
+
61
+
62
+ class ScientificPapersConfig(datasets.BuilderConfig):
63
+ """BuilderConfig for Scientific Papers."""
64
+
65
+ def __init__(self, filename=None, **kwargs):
66
+ """BuilderConfig for ScientificPapers
67
+
68
+ Args:
69
+ filename: filename of different configs for the dataset.
70
+ **kwargs: keyword arguments forwarded to super.
71
+ """
72
+ # 1.1.0 remove sentence breaker <S> and </S> in summary.
73
+ super(ScientificPapersConfig, self).__init__(version=datasets.Version("1.1.1"), **kwargs)
74
+ self.filename = filename
75
+
76
+
77
+ class ScientificPapers(datasets.GeneratorBasedBuilder):
78
+ """Scientific Papers."""
79
+
80
+ BUILDER_CONFIGS = [
81
+ ScientificPapersConfig(name="pubmed", description="Documents from PubMed repository."),
82
+ ScientificPapersConfig(name="arxiv", description="Documents from ArXiv repository."),
83
+ ]
84
+
85
+ def _info(self):
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=datasets.Features(
89
+ {
90
+ _DOCUMENT: datasets.Value("string"),
91
+ _SUMMARY: datasets.Value("string"),
92
+ "section_names": datasets.Value("string"),
93
+ }
94
+ ),
95
+ supervised_keys=None,
96
+ homepage="https://github.com/armancohan/long-summarization",
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+ """Returns SplitGenerators."""
102
+ dl_paths = dl_manager.download_and_extract(_URLS)
103
+ path = os.path.join(dl_paths[self.config.name], self.config.name + "-dataset")
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ gen_kwargs={"path": os.path.join(path, "train.txt")},
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.VALIDATION,
111
+ gen_kwargs={"path": os.path.join(path, "val.txt")},
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={"path": os.path.join(path, "test.txt")},
116
+ ),
117
+ ]
118
+
119
+ def _generate_examples(self, path=None):
120
+ """Yields examples."""
121
+ with open(path, encoding="utf-8") as f:
122
+ for line in f:
123
+ # Possible keys are:
124
+ # "article_id": str
125
+ # "article_text": list[str] article (list of paragraphs).
126
+ # "abstract_text": list[str], abstract (list of paragraphs).
127
+ # "section_names": list[str], list of section names.
128
+ # "sections": list[list[str]], list of sections (list of paragraphs)
129
+ d = json.loads(line)
130
+ summary = "\n".join(d["abstract_text"])
131
+ # In original paper, <S> and </S> are not used in vocab during training
132
+ # or during decoding.
133
+ # https://github.com/armancohan/long-summarization/blob/master/data.py#L27
134
+ summary = summary.replace("<S>", "").replace("</S>", "")
135
+ yield d["article_id"], {
136
+ _DOCUMENT: "\n".join(d["article_text"]),
137
+ _SUMMARY: summary,
138
+ "section_names": "\n".join(d["section_names"]),
139
+ }
scientific_papers_dummy.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Scientific Papers Dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """
28
+ @article{Cohan_2018,
29
+ title={A Discourse-Aware Attention Model for Abstractive Summarization of
30
+ Long Documents},
31
+ url={http://dx.doi.org/10.18653/v1/n18-2097},
32
+ DOI={10.18653/v1/n18-2097},
33
+ journal={Proceedings of the 2018 Conference of the North American Chapter of
34
+ the Association for Computational Linguistics: Human Language
35
+ Technologies, Volume 2 (Short Papers)},
36
+ publisher={Association for Computational Linguistics},
37
+ author={Cohan, Arman and Dernoncourt, Franck and Kim, Doo Soon and Bui, Trung and Kim, Seokhwan and Chang, Walter and Goharian, Nazli},
38
+ year={2018}
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """
43
+ Scientific papers datasets contains two sets of long and structured documents.
44
+ The datasets are obtained from ArXiv and PubMed OpenAccess repositories.
45
+
46
+ Both "arxiv" and "pubmed" have two features:
47
+ - article: the body of the document, pagragraphs seperated by "/n".
48
+ - abstract: the abstract of the document, pagragraphs seperated by "/n".
49
+ - section_names: titles of sections, seperated by "/n".
50
+
51
+ """
52
+
53
+ _DOCUMENT = "article"
54
+ _SUMMARY = "abstract"
55
+
56
+ _URLS = {
57
+ "arxiv": "https://s3.amazonaws.com/datasets.huggingface.co/scientific_papers/1.1.1/dummy_data/arxiv_data.zip",
58
+ "pubmed": "https://s3.amazonaws.com/datasets.huggingface.co/scientific_papers/1.1.1/dummy_data/pubmed_data.zip"
59
+ }
60
+
61
+
62
+ class ScientificPapersConfig(datasets.BuilderConfig):
63
+ """BuilderConfig for Scientific Papers."""
64
+
65
+ def __init__(self, filename=None, **kwargs):
66
+ """BuilderConfig for ScientificPapers
67
+
68
+ Args:
69
+ filename: filename of different configs for the dataset.
70
+ **kwargs: keyword arguments forwarded to super.
71
+ """
72
+ # 1.1.0 remove sentence breaker <S> and </S> in summary.
73
+ super(ScientificPapersConfig, self).__init__(version=datasets.Version("1.1.1"), **kwargs)
74
+ self.filename = filename
75
+
76
+
77
+ class ScientificPapers(datasets.GeneratorBasedBuilder):
78
+ """Scientific Papers."""
79
+
80
+ BUILDER_CONFIGS = [
81
+ ScientificPapersConfig(name="pubmed", description="Documents from PubMed repository."),
82
+ ScientificPapersConfig(name="arxiv", description="Documents from ArXiv repository."),
83
+ ]
84
+
85
+ def _info(self):
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=datasets.Features(
89
+ {
90
+ _DOCUMENT: datasets.Value("string"),
91
+ _SUMMARY: datasets.Value("string"),
92
+ "section_names": datasets.Value("string"),
93
+ }
94
+ ),
95
+ supervised_keys=None,
96
+ homepage="https://github.com/armancohan/long-summarization",
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+ """Returns SplitGenerators."""
102
+ dl_paths = dl_manager.download_and_extract(_URLS)
103
+ path = os.path.join(dl_paths[self.config.name], "dummy_data", self.config.name + "-dataset.zip", self.config.name + "-dataset")
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ gen_kwargs={"path": os.path.join(path, "train.txt")},
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.VALIDATION,
111
+ gen_kwargs={"path": os.path.join(path, "val.txt")},
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={"path": os.path.join(path, "test.txt")},
116
+ ),
117
+ ]
118
+
119
+ def _generate_examples(self, path=None):
120
+ """Yields examples."""
121
+ with open(path, encoding="utf-8") as f:
122
+ for line in f:
123
+ # Possible keys are:
124
+ # "article_id": str
125
+ # "article_text": list[str] article (list of paragraphs).
126
+ # "abstract_text": list[str], abstract (list of paragraphs).
127
+ # "section_names": list[str], list of section names.
128
+ # "sections": list[list[str]], list of sections (list of paragraphs)
129
+ d = json.loads(line)
130
+ summary = "\n".join(d["abstract_text"])
131
+ # In original paper, <S> and </S> are not used in vocab during training
132
+ # or during decoding.
133
+ # https://github.com/armancohan/long-summarization/blob/master/data.py#L27
134
+ summary = summary.replace("<S>", "").replace("</S>", "")
135
+ yield d["article_id"], {
136
+ _DOCUMENT: "\n".join(d["article_text"]),
137
+ _SUMMARY: summary,
138
+ "section_names": "\n".join(d["section_names"]),
139
+ }
scientific_papers_dummy.py.lock ADDED
File without changes