# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """MediaSum dataset""" import os import json import datasets logger = datasets.logging.get_logger(__name__) _HOMEPAGE = "https://github.com/zcgzcgzcg1/MediaSum" _DESCRIPTION = """\ This large-scale media interview dataset contains 463.6K transcripts with abstractive summaries, collected from interview transcripts and overview / topic descriptions from NPR and CNN. """ _CITATION = """\ @article{zhu2021mediasum, title={MediaSum: A Large-scale Media Interview Dataset for Dialogue Summarization}, author={Zhu, Chenguang and Liu, Yang and Mei, Jie and Zeng, Michael}, journal={arXiv preprint arXiv:2103.06410}, year={2021} } """ _DOWNLOAD_URLS = { "train": "https://huggingface.co/datasets/nbroad/mediasum/resolve/main/train.json", "validation": "https://huggingface.co/datasets/nbroad/mediasum/resolve/main/validation.json", "test": "https://huggingface.co/datasets/nbroad/mediasum/resolve/main/test.json", } class MediaSumConfig(datasets.BuilderConfig): """BuilderConfig for MediaSum.""" def __init__(self, **kwargs): """BuilderConfig for MediaSum. Args: **kwargs: keyword arguments forwarded to super. """ super().__init__(**kwargs) class MediaSum(datasets.GeneratorBasedBuilder): """MediaSum summarization dataset.""" BUILDER_CONFIGS = [MediaSumConfig(name="mediasum", description="Plain text")] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "program": datasets.Value("string"), "date": datasets.Value("string"), "url": datasets.Value("string"), "title": datasets.Value("string"), "summary": datasets.Value("string"), "utt": datasets.features.Sequence( datasets.Value("string") ), "speaker": datasets.features.Sequence( datasets.Value("string") ), } ), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): dl_path = dl_manager.download(_DOWNLOAD_URLS) return [ datasets.SplitGenerator( name=split, gen_kwargs={ "filepath": dl_path[split], }, ) for split in [ datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST, ] ] def _generate_examples(self, filepath): with open(filepath, "r") as fp: for idx, line in enumerate(fp): data = json.loads(line) # Some do not have titles if "title" not in data: data["title"] = "" yield idx, data