ccdv commited on
Commit
c90474f
1 Parent(s): 49c80ad
Files changed (5) hide show
  1. README.md +56 -0
  2. mediasum.py +3 -1
  3. test_data.zip +3 -0
  4. train_data.zip +3 -0
  5. val_data.zip +3 -0
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ languages:
3
+ - en
4
+ multilinguality:
5
+ - monolingual
6
+ size_categories:
7
+ - 100K<n<1M
8
+ task_categories:
9
+ - conditional-text-generation
10
+ task_ids:
11
+ - summarization
12
+ ---
13
+
14
+ # MediaSum dataset for summarization
15
+
16
+ Summarization dataset copied from [MediaSum: A Large-scale Media Interview Dataset for Dialogue Summarization](https://github.com/zcgzcgzcg1/MediaSumR)
17
+
18
+ This dataset is compatible with the [`run_summarization.py`](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) script from Transformers if you add this line to the `summarization_name_mapping` variable:
19
+ ```python
20
+ "ccdv/mediasum": ("document", "summary")
21
+ ```
22
+
23
+ # Configs
24
+ 4 possibles configs:
25
+ - `roberta` will concatenate documents with "</s>" (default)
26
+ - `newline` will concatenate documents with "\n"
27
+ - `bert` will concatenate documents with "[SEP]"
28
+ - `list` will return the list of documents instead of a single string
29
+
30
+ ### Data Fields
31
+
32
+ - `id`: paper id
33
+ - `document`: a string/list containing the body of a set of documents
34
+ - `summary`: a string containing the abstract of the set
35
+
36
+ ### Data Splits
37
+
38
+ This dataset has 3 splits: _train_, _validation_, and _test_. \
39
+ Token counts are white space based.
40
+
41
+ | Dataset Split | Number of Instances |
42
+ | ------------- | --------------------|
43
+ | Train | 443596 |
44
+ | Validation | 10000 |
45
+ | Test | 10000 |
46
+
47
+
48
+ # Cite original article
49
+ ```
50
+ @article{zhu2021mediasum,
51
+ title={MediaSum: A Large-scale Media Interview Dataset for Dialogue Summarization},
52
+ author={Zhu, Chenguang and Liu, Yang and Mei, Jie and Zeng, Michael},
53
+ journal={arXiv preprint arXiv:2103.06410},
54
+ year={2021}
55
+ }
56
+ ```
mediasum.py CHANGED
@@ -21,7 +21,7 @@ _CITATION = """\
21
  }
22
  """
23
  _ABSTRACT = "summary"
24
- _ARTICLE = "utt"
25
 
26
  class MediaSumSummarizationConfig(datasets.BuilderConfig):
27
  """BuilderConfig for MediaSumSummarization."""
@@ -117,7 +117,9 @@ class MediaSumSummarizationDataset(datasets.GeneratorBasedBuilder):
117
  'summary': str,
118
  'document': List[str],
119
  """
 
120
  document = data["utt"]
 
121
  if self.config.name != "list":
122
  document = join_.join(document)
123
  summary = data["summary"]
 
21
  }
22
  """
23
  _ABSTRACT = "summary"
24
+ _ARTICLE = "document"
25
 
26
  class MediaSumSummarizationConfig(datasets.BuilderConfig):
27
  """BuilderConfig for MediaSumSummarization."""
 
117
  'summary': str,
118
  'document': List[str],
119
  """
120
+
121
  document = data["utt"]
122
+
123
  if self.config.name != "list":
124
  document = join_.join(document)
125
  summary = data["summary"]
test_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:760b1efde09738c70fdb0449dbb9a4fa07f1d474cb790ddf9446650052df89d7
3
+ size 33913771
train_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d29532f5f9d84b33a9007ebad450898540742edff55d5d058fa47e4c975a3b7
3
+ size 1442774847
val_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f466af07260fd2fa72aff5b065ac355e9bf677931b3e84c99764e6ebf2c9215
3
+ size 34058060