Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
nbroad HF staff commited on
Commit
9e31b69
1 Parent(s): 45e7efa

config and copied code from cnn/dailymail

Browse files
Files changed (1) hide show
  1. citesum.py +129 -0
citesum.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """CiteSum dataset"""
18
+
19
+ import hashlib
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ logger = datasets.logging.get_logger(__name__)
26
+
27
+
28
+ _HOMEPAGE = "https://github.com/morningmoni/CiteSum"
29
+
30
+ _DESCRIPTION = """\
31
+ Citation Text-guided Scientific Extreme Summarization and Low-resource Domain Adaptation
32
+ CiteSum contains TLDR summaries for scientific papers from their citation texts without human annotation.
33
+ CiteSum is around 30 times larger than the previous human-curated dataset SciTLDR.
34
+ """
35
+
36
+ # The second citation introduces the source data, while the first
37
+ # introduces the specific form (non-anonymized) we use here.
38
+ _CITATION = """\
39
+ @misc{https://doi.org/10.48550/arxiv.2205.06207,
40
+ doi = {10.48550/ARXIV.2205.06207},
41
+ url = {https://arxiv.org/abs/2205.06207},
42
+ author = {Mao, Yuning and Zhong, Ming and Han, Jiawei},
43
+ keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
44
+ title = {CiteSum: Citation Text-guided Scientific Extreme Summarization and Low-resource Domain Adaptation},
45
+ publisher = {arXiv},
46
+ year = {2022},
47
+ copyright = {Creative Commons Attribution 4.0 International}
48
+ }
49
+
50
+ """
51
+
52
+ _DOWNLOAD_URL = "https://drive.google.com/file/d/1ndHCREXGSPnDUNllladh9qCtayqbXAfJ"
53
+
54
+
55
+ class CiteSumConfig(datasets.BuilderConfig):
56
+ """BuilderConfig for CiteSum."""
57
+
58
+ def __init__(self, **kwargs):
59
+ """BuilderConfig for CiteSum.
60
+ Args:
61
+ **kwargs: keyword arguments forwarded to super.
62
+ """
63
+ super().__init__(**kwargs)
64
+
65
+
66
+ class CiteSum(datasets.GeneratorBasedBuilder):
67
+ """CiteSum summarization dataset."""
68
+
69
+ BUILDER_CONFIGS = [CiteSumConfig(name="citesum", description="Plain text")]
70
+
71
+ def _info(self):
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=datasets.Features(
75
+ {
76
+ "src": datasets.Value("string"),
77
+ "tgt": datasets.Value("string"),
78
+ "paper_id": datasets.Value("string"),
79
+ "title": datasets.Value("string"),
80
+ "discipline": {
81
+ "venue": datasets.Value("string"),
82
+ "journal": datasets.Value("string"),
83
+ "mag_field_of_study": datasets.features.Sequence(
84
+ datasets.Value("string")
85
+ ),
86
+ },
87
+ }
88
+ ),
89
+ supervised_keys=None,
90
+ homepage=_HOMEPAGE,
91
+ citation=_CITATION,
92
+ )
93
+
94
+ def _split_generators(self, dl_manager):
95
+ dl_paths = dl_manager.download(_DOWNLOAD_URL)
96
+ return [
97
+ datasets.SplitGenerator(
98
+ name=split,
99
+ gen_kwargs={
100
+ "urls_file": dl_paths[split],
101
+ "files_per_archive": [
102
+ dl_manager.iter_archive(dl_paths["cnn_stories"]),
103
+ dl_manager.iter_archive(dl_paths["dm_stories"]),
104
+ ],
105
+ },
106
+ )
107
+ for split in [
108
+ datasets.Split.TRAIN,
109
+ datasets.Split.VALIDATION,
110
+ datasets.Split.TEST,
111
+ ]
112
+ ]
113
+
114
+ def _generate_examples(self, urls_file, files_per_archive):
115
+ urls = _get_url_hashes(urls_file)
116
+ idx = 0
117
+ for files in files_per_archive:
118
+ for path, file in files:
119
+ hash_from_path = _get_hash_from_path(path)
120
+ if hash_from_path in urls:
121
+ article, highlights = _get_art_abs(file, self.config.version)
122
+ if not article or not highlights:
123
+ continue
124
+ yield idx, {
125
+ _ARTICLE: article,
126
+ _HIGHLIGHTS: highlights,
127
+ "id": hash_from_path,
128
+ }
129
+ idx += 1