yuyang commited on
Commit
232b771
1 Parent(s): a1628c2

add data loading script and readme

Browse files
Files changed (2) hide show
  1. README.md +11 -3
  2. distil_cnndm.py +139 -0
README.md CHANGED
@@ -1,3 +1,11 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
1
+ # Distilled CNN/DailyMail Dataset
2
+
3
+ This folder contains the distilled data and dataset loading script to build a dataset on top of it.
4
+
5
+ - `cnn_bart_pl` is downloaded from [Saved Pseudo-Labels](https://github.com/huggingface/transformers/blob/main/examples/research_projects/seq2seq-distillation/precomputed_pseudo_labels.md), which is generated by facebook/bart-large-cnn, this corresponds to version "1.0.0". It contains train/validataion/test splits.
6
+ - `pegasus_cnn_cnn_pls` is also downloaded from [Saved Pseudo-Labels](https://github.com/huggingface/transformers/blob/main/examples/research_projects/seq2seq-distillation/precomputed_pseudo_labels.md). It is generated by sshleifer/pegasus-cnn-ft-v2, and it corresponds to version "2.0.0". It only includes the train split.
7
+
8
+
9
+ ## Updates
10
+ - 03/16/2023
11
+ 1. Remove "(CNN)" in the beginning of articles.
distil_cnndm.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # References:
3
+ # (1) https://huggingface.co/datasets/cnn_dailymail/blob/main/cnn_dailymail.py
4
+ # (2) https://huggingface.co/docs/datasets/dataset_script
5
+
6
+ """Distilled CNN/DailyMail Summarization dataset."""
7
+
8
+ import os
9
+
10
+ import datasets
11
+ import nltk
12
+
13
+
14
+ _DESCRIPTION = """\
15
+ Distilled CNN/DailyMail non-anonymized summarization dataset.
16
+ There are two features:
17
+ - article: text of news article, used as the document to be summarized
18
+ - highlights: joined text of highlights with <s> and </s> around each
19
+ highlight, which is the target summary
20
+
21
+ The distilled data is obtained by running facebook/bart-large-cnn on the
22
+ training dataset. The files used here is directly downloaded from
23
+ https://github.com/huggingface/transformers/blob/main/examples/research_projects/seq2seq-distillation/precomputed_pseudo_labels.md.
24
+ """
25
+
26
+ _CITATION = ""
27
+
28
+ _DL_URLS = {
29
+ "cnn_bart_pl": "https://cdn-datasets.huggingface.co/pseudo/cnn_dm/cnn_bart_pl.tgz",
30
+ "cnn_pegasus_pl": "https://cdn-datasets.huggingface.co/pseudo/cnn_dm/pegasus_cnn_cnn_pls.tgz",
31
+ }
32
+
33
+ # as mentioned in https://github.com/huggingface/transformers/blob/main/examples/research_projects/seq2seq-distillation/precomputed_pseudo_labels.md#available-pseudo-labels,
34
+ # about 5K are missing, and the training should be 282173.
35
+ _NUM_EXAMPLES = {"train": 282173, "val": 13368, "test": 11490}
36
+
37
+ # maps from datasets.Split to the one used in the downloaded data.
38
+ _SPLIT_MAP = {"train": "train", "test": "test", "validation": "val"}
39
+
40
+ _SUPPORTED_VERSIONS = [
41
+ # Using the pseudo labels generated by BART.
42
+ datasets.Version("1.0.0", "Using cased version and the one generated by BART."),
43
+ # Using the pseudo labels generated by Pegasus.
44
+ datasets.Version("2.0.0", "Using cased version and the one generated by PEGASUS."),
45
+ ]
46
+
47
+ _DEFAULT_VERSION = datasets.Version("2.0.0", "Using cased version.")
48
+
49
+
50
+ class DistillCNNDMConfig(datasets.BuilderConfig):
51
+ """BuilderConfig for DistillCNNDM."""
52
+
53
+ def __init__(self, **kwargs):
54
+ super().__init__(**kwargs)
55
+
56
+
57
+ class DistillCNNDM(datasets.GeneratorBasedBuilder):
58
+ """Distilled CNN/DailyMail non-anonymized summarization dataset."""
59
+
60
+ BUILDER_CONFIGS = [
61
+ DistillCNNDMConfig(name=str(version), description="Plain text", version=version)
62
+ for version in _SUPPORTED_VERSIONS
63
+ ]
64
+
65
+ def _info(self):
66
+ return datasets.DatasetInfo(
67
+ description=_DESCRIPTION,
68
+ features=datasets.Features(
69
+ {
70
+ "article": datasets.Value("string"),
71
+ "highlights": datasets.Value("string"),
72
+ }
73
+ ),
74
+ supervised_keys=None,
75
+ citation=_CITATION,
76
+ )
77
+
78
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
79
+ """Split generators.
80
+
81
+ Note that the validation data have prefix val instead of
82
+ validation, so we use a split mapping.
83
+
84
+ Although dl_manager is not used, we still need to keep it.
85
+ """
86
+ dl_paths = dl_manager.download(_DL_URLS)
87
+ if self.config.version == "1.0.0":
88
+ return [
89
+ datasets.SplitGenerator(
90
+ name=split,
91
+ gen_kwargs={
92
+ "src_path": os.path.join(
93
+ dl_paths["cnn_bart_pl"], f"{_SPLIT_MAP[split]}.source"
94
+ ),
95
+ "tgt_path": os.path.join(
96
+ dl_paths["cnn_bart_pl"], f"{_SPLIT_MAP[split]}.target"
97
+ ),
98
+ "num_examples": _NUM_EXAMPLES[_SPLIT_MAP[split]],
99
+ },
100
+ )
101
+ for split in [
102
+ datasets.Split.TRAIN,
103
+ datasets.Split.VALIDATION,
104
+ datasets.Split.TEST,
105
+ ]
106
+ ]
107
+ elif self.config.version == "2.0.0":
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ gen_kwargs={
112
+ "src_path": os.path.join(
113
+ dl_paths["cnn_pegasus_pl"], "train.source"
114
+ ),
115
+ "tgt_path": os.path.join(
116
+ dl_paths["cnn_pegasus_pl"], "train.target"
117
+ ),
118
+ "num_examples": 287112,
119
+ },
120
+ )
121
+ ]
122
+
123
+ def _generate_examples(self, src_path, tgt_path, num_examples):
124
+ """This function returns the examples in the raw text form.
125
+
126
+ The output article and highlights formats resemble those given
127
+ by `load_dataset("cnn_dailymail", "3.0.0")`.
128
+ """
129
+ with open(src_path) as src, open(tgt_path) as tgt:
130
+ for idx in range(num_examples):
131
+ article = src.readline().strip()
132
+ if article[:5] == "(CNN)":
133
+ article = article[5:]
134
+ highlights = tgt.readline().strip()
135
+ highlights = "\n".join(nltk.sent_tokenize(highlights))
136
+ yield idx, {
137
+ "article": article,
138
+ "highlights": highlights,
139
+ }