Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
ArXiv:
Tags:
License:
Nicholas Broad commited on
Commit
0f2e4bd
1 Parent(s): e58c777

builder script

Browse files
Files changed (1) hide show
  1. mediasum.py +117 -0
mediasum.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """MediaSum dataset"""
18
+
19
+ import os
20
+ import json
21
+
22
+ import datasets
23
+
24
+
25
+ logger = datasets.logging.get_logger(__name__)
26
+
27
+
28
+ _HOMEPAGE = "https://github.com/zcgzcgzcg1/MediaSum"
29
+
30
+ _DESCRIPTION = """\
31
+ This large-scale media interview dataset contains 463.6K transcripts with abstractive summaries,
32
+ collected from interview transcripts and overview / topic descriptions from NPR and CNN.
33
+ """
34
+
35
+ _CITATION = """\
36
+ @article{zhu2021mediasum,
37
+ title={MediaSum: A Large-scale Media Interview Dataset for Dialogue Summarization},
38
+ author={Zhu, Chenguang and Liu, Yang and Mei, Jie and Zeng, Michael},
39
+ journal={arXiv preprint arXiv:2103.06410},
40
+ year={2021}
41
+ }
42
+ """
43
+
44
+ _DOWNLOAD_URLS = {
45
+ "train": "https://huggingface.co/datasets/nbroad/mediasum/resolve/main/train.json",
46
+ "validation": "https://huggingface.co/datasets/nbroad/mediasum/resolve/main/validation.json",
47
+ "test": "https://huggingface.co/datasets/nbroad/mediasum/resolve/main/test.json",
48
+ }
49
+
50
+
51
+ class MediaSumConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for MediaSum."""
53
+
54
+ def __init__(self, **kwargs):
55
+ """BuilderConfig for MediaSum.
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super().__init__(**kwargs)
60
+
61
+
62
+ class MediaSum(datasets.GeneratorBasedBuilder):
63
+ """MediaSum summarization dataset."""
64
+
65
+ BUILDER_CONFIGS = [MediaSumConfig(name="mediasum", description="Plain text")]
66
+
67
+ def _info(self):
68
+ return datasets.DatasetInfo(
69
+ description=_DESCRIPTION,
70
+ features=datasets.Features(
71
+ {
72
+ "id": datasets.Value("string"),
73
+ "program": datasets.Value("string"),
74
+ "date": datasets.Value("string"),
75
+ "url": datasets.Value("string"),
76
+ "title": datasets.Value("string"),
77
+ "summary": datasets.Value("string"),
78
+ "utt": datasets.features.Sequence(
79
+ datasets.Value("string")
80
+ ),
81
+ "speaker": datasets.features.Sequence(
82
+ datasets.Value("string")
83
+ ),
84
+ }
85
+ ),
86
+ supervised_keys=None,
87
+ homepage=_HOMEPAGE,
88
+ citation=_CITATION,
89
+ )
90
+
91
+ def _split_generators(self, dl_manager):
92
+ dl_path = dl_manager.download(_DOWNLOAD_URLS)
93
+
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=split,
97
+ gen_kwargs={
98
+ "filepath": dl_path[split],
99
+ },
100
+ )
101
+ for split in [
102
+ datasets.Split.TRAIN,
103
+ datasets.Split.VALIDATION,
104
+ datasets.Split.TEST,
105
+ ]
106
+ ]
107
+
108
+ def _generate_examples(self, filepath):
109
+
110
+ with open(filepath, "r") as fp:
111
+ for idx, line in enumerate(fp):
112
+ data = json.loads(line)
113
+
114
+ # Some do not have titles
115
+ if "title" not in data:
116
+ data["title"] = ""
117
+ yield idx, data