ccdv commited on
Commit
1704831
1 Parent(s): 8b56e37

first commit

Browse files
Files changed (5) hide show
  1. README.md +85 -0
  2. WCEP-10.py +158 -0
  3. test.zip +3 -0
  4. train.zip +3 -0
  5. val.zip +3 -0
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ languages:
3
+ - en
4
+ multilinguality:
5
+ - monolingual
6
+ size_categories:
7
+ - 1K<n<10K
8
+ task_categories:
9
+ - conditional-text-generation
10
+ task_ids:
11
+ - summarization
12
+ ---
13
+
14
+ # WCEP10 dataset for summarization
15
+
16
+ Summarization dataset copied from [PRIMERA](https://github.com/allenai/PRIMER)
17
+ This dataset is compatible with the [`run_summarization.py`](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) script from Transformers if you add this line to the `summarization_name_mapping` variable:
18
+ ```python
19
+ "ccdv/WCEP-10": ("document", "summary")
20
+ ```
21
+
22
+ # Configs
23
+ 3 possibles configs:
24
+ - `roberta` will concatenate documents with "</s>" (default)
25
+ - `newline` will concatenate documents with "\n"
26
+ - `bert` will concatenate documents with "[SEP]"
27
+ - `list` will return the list of documents instead of a string
28
+
29
+ ### Data Fields
30
+
31
+ - `id`: paper id
32
+ - `document`: a string/list containing the body of a set of documents
33
+ - `summary`: a string containing the abstract of the set
34
+
35
+ ### Data Splits
36
+
37
+ This dataset has 3 splits: _train_, _validation_, and _test_. \
38
+ Token counts are white space based.
39
+
40
+ | Dataset Split | Number of Instances |
41
+ | ------------- | --------------------|
42
+ | Train | 8158 |
43
+ | Validation | 1020 |
44
+ | Test | 1022 |
45
+
46
+
47
+ # Cite original article
48
+ ```
49
+ @article{DBLP:journals/corr/abs-2005-10070,
50
+ author = {Demian Gholipour Ghalandari and
51
+ Chris Hokamp and
52
+ Nghia The Pham and
53
+ John Glover and
54
+ Georgiana Ifrim},
55
+ title = {A Large-Scale Multi-Document Summarization Dataset from the Wikipedia
56
+ Current Events Portal},
57
+ journal = {CoRR},
58
+ volume = {abs/2005.10070},
59
+ year = {2020},
60
+ url = {https://arxiv.org/abs/2005.10070},
61
+ eprinttype = {arXiv},
62
+ eprint = {2005.10070},
63
+ timestamp = {Fri, 22 May 2020 16:21:28 +0200},
64
+ biburl = {https://dblp.org/rec/journals/corr/abs-2005-10070.bib},
65
+ bibsource = {dblp computer science bibliography, https://dblp.org}
66
+ }
67
+
68
+ @article{DBLP:journals/corr/abs-2110-08499,
69
+ author = {Wen Xiao and
70
+ Iz Beltagy and
71
+ Giuseppe Carenini and
72
+ Arman Cohan},
73
+ title = {{PRIMER:} Pyramid-based Masked Sentence Pre-training for Multi-document
74
+ Summarization},
75
+ journal = {CoRR},
76
+ volume = {abs/2110.08499},
77
+ year = {2021},
78
+ url = {https://arxiv.org/abs/2110.08499},
79
+ eprinttype = {arXiv},
80
+ eprint = {2110.08499},
81
+ timestamp = {Fri, 22 Oct 2021 13:33:09 +0200},
82
+ biburl = {https://dblp.org/rec/journals/corr/abs-2110-08499.bib},
83
+ bibsource = {dblp computer science bibliography, https://dblp.org}
84
+ }
85
+ ```
WCEP-10.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import datasets
5
+ from datasets.tasks import TextClassification
6
+
7
+ _CITATION = None
8
+
9
+
10
+ _DESCRIPTION = """
11
+ WCEP10 dataset for summarization.
12
+ From paper: "A Large-Scale Multi-Document Summarization Dataset from the Wikipedia
13
+ Current Events Portal" by D. Gholipour et al."
14
+ From paper: "PRIMER: Pyramid-based Masked Sentence Pre-training for Multi-document
15
+ Summarization" by W. Xiao et al."
16
+
17
+ """
18
+ _CITATION = """\
19
+ @article{DBLP:journals/corr/abs-2005-10070,
20
+ author = {Demian Gholipour Ghalandari and
21
+ Chris Hokamp and
22
+ Nghia The Pham and
23
+ John Glover and
24
+ Georgiana Ifrim},
25
+ title = {A Large-Scale Multi-Document Summarization Dataset from the Wikipedia
26
+ Current Events Portal},
27
+ journal = {CoRR},
28
+ volume = {abs/2005.10070},
29
+ year = {2020},
30
+ url = {https://arxiv.org/abs/2005.10070},
31
+ eprinttype = {arXiv},
32
+ eprint = {2005.10070},
33
+ timestamp = {Fri, 22 May 2020 16:21:28 +0200},
34
+ biburl = {https://dblp.org/rec/journals/corr/abs-2005-10070.bib},
35
+ bibsource = {dblp computer science bibliography, https://dblp.org}
36
+ }
37
+
38
+
39
+ @article{DBLP:journals/corr/abs-2110-08499,
40
+ author = {Wen Xiao and
41
+ Iz Beltagy and
42
+ Giuseppe Carenini and
43
+ Arman Cohan},
44
+ title = {{PRIMER:} Pyramid-based Masked Sentence Pre-training for Multi-document
45
+ Summarization},
46
+ journal = {CoRR},
47
+ volume = {abs/2110.08499},
48
+ year = {2021},
49
+ url = {https://arxiv.org/abs/2110.08499},
50
+ eprinttype = {arXiv},
51
+ eprint = {2110.08499},
52
+ timestamp = {Fri, 22 Oct 2021 13:33:09 +0200},
53
+ biburl = {https://dblp.org/rec/journals/corr/abs-2110-08499.bib},
54
+ bibsource = {dblp computer science bibliography, https://dblp.org}
55
+ }
56
+ """
57
+ _ABSTRACT = "summary"
58
+ _ARTICLE = "document"
59
+
60
+ class WCEP10SummarizationConfig(datasets.BuilderConfig):
61
+ """BuilderConfig for WCEP10Summarization."""
62
+
63
+ def __init__(self, **kwargs):
64
+ """BuilderConfig for WCEP10Summarization.
65
+ Args:
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(WCEP10SummarizationConfig, self).__init__(**kwargs)
69
+
70
+
71
+ class WCEP10SummarizationDataset(datasets.GeneratorBasedBuilder):
72
+ """WCEP10Summarization Dataset."""
73
+
74
+ _TRAIN_FILE = "train.zip"
75
+ _VAL_FILE = "val.zip"
76
+ _TEST_FILE = "test.zip"
77
+
78
+ BUILDER_CONFIGS = [
79
+ WCEP10SummarizationConfig(
80
+ name="newline",
81
+ version=datasets.Version("1.0.0"),
82
+ description="WCEP10 dataset for summarization, concat sections",
83
+ ),
84
+ WCEP10SummarizationConfig(
85
+ name="roberta",
86
+ version=datasets.Version("1.0.0"),
87
+ description="WCEP10 dataset for summarization, document",
88
+ ),
89
+ WCEP10SummarizationConfig(
90
+ name="bert",
91
+ version=datasets.Version("1.0.0"),
92
+ description="WCEP10 dataset for summarization, document",
93
+ ),
94
+ WCEP10SummarizationConfig(
95
+ name="list",
96
+ version=datasets.Version("1.0.0"),
97
+ description="WCEP10 dataset for summarization, document",
98
+ ),
99
+ ]
100
+
101
+ DEFAULT_CONFIG_NAME = "roberta"
102
+
103
+ def _info(self):
104
+ # Should return a datasets.DatasetInfo object
105
+ return datasets.DatasetInfo(
106
+ description=_DESCRIPTION,
107
+ features=datasets.Features(
108
+ {
109
+ _ARTICLE: datasets.Sequence(datasets.Value("string")) if self.config.name == "list" else datasets.Value("string"),
110
+ _ABSTRACT: datasets.Value("string"),
111
+ #"id": datasets.Value("string"),
112
+ }
113
+ ),
114
+ supervised_keys=None,
115
+ homepage="https://github.com/allenai/PRIMER",
116
+ citation=_CITATION,
117
+ )
118
+
119
+ def _split_generators(self, dl_manager):
120
+
121
+ train_path = dl_manager.download_and_extract(self._TRAIN_FILE) + "/train.txt"
122
+ val_path = dl_manager.download_and_extract(self._VAL_FILE) + "/val.txt"
123
+ test_path = dl_manager.download_and_extract(self._TEST_FILE) + "/test.txt"
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}
131
+ ),
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
134
+ ),
135
+ ]
136
+
137
+ def _generate_examples(self, filepath):
138
+ """Generate WCEP10Summarization examples."""
139
+ if self.config.name == "newline":
140
+ join_ = "\n"
141
+ elif self.config.name == "roberta":
142
+ join_ = "</s>"
143
+ elif self.config.name == "bert":
144
+ join_ = "[SEP]"
145
+
146
+ with open(filepath, encoding="utf-8") as f:
147
+ for id_, row in enumerate(f):
148
+ data = json.loads(row)
149
+
150
+ """
151
+ 'summary': str,
152
+ 'document': List[str],
153
+ """
154
+ document = data["document"]
155
+ if self.config.name != "list":
156
+ document = join_.join(document)
157
+ summary = data["summary"]
158
+ yield id_, {"document": document, "summary": summary}
test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c9f4e660b21b5d581eaf05c7f75ee2a8597ec9e4ac030d43b07442bf0993682
3
+ size 6850444
train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24e1d9989a38da9707471ec5c47d89ecd192eb34b5f678ecdc7ce2dae9411087
3
+ size 55467667
val.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd23d8bdcb486abb41f7a14269ae472b38dcaccc108837a29c8dd9fd1cb90aec
3
+ size 7149706