|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""MediaSum dataset""" |
|
|
|
import os |
|
import json |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_HOMEPAGE = "https://github.com/zcgzcgzcg1/MediaSum" |
|
|
|
_DESCRIPTION = """\ |
|
This large-scale media interview dataset contains 463.6K transcripts with abstractive summaries, |
|
collected from interview transcripts and overview / topic descriptions from NPR and CNN. |
|
""" |
|
|
|
_CITATION = """\ |
|
@article{zhu2021mediasum, |
|
title={MediaSum: A Large-scale Media Interview Dataset for Dialogue Summarization}, |
|
author={Zhu, Chenguang and Liu, Yang and Mei, Jie and Zeng, Michael}, |
|
journal={arXiv preprint arXiv:2103.06410}, |
|
year={2021} |
|
} |
|
""" |
|
|
|
_DOWNLOAD_URLS = { |
|
"train": "https://huggingface.co/datasets/nbroad/mediasum/resolve/main/train.json", |
|
"validation": "https://huggingface.co/datasets/nbroad/mediasum/resolve/main/validation.json", |
|
"test": "https://huggingface.co/datasets/nbroad/mediasum/resolve/main/test.json", |
|
} |
|
|
|
|
|
class MediaSumConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for MediaSum.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for MediaSum. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(**kwargs) |
|
|
|
|
|
class MediaSum(datasets.GeneratorBasedBuilder): |
|
"""MediaSum summarization dataset.""" |
|
|
|
BUILDER_CONFIGS = [MediaSumConfig(name="mediasum", description="Plain text")] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"program": datasets.Value("string"), |
|
"date": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"summary": datasets.Value("string"), |
|
"utt": datasets.features.Sequence( |
|
datasets.Value("string") |
|
), |
|
"speaker": datasets.features.Sequence( |
|
datasets.Value("string") |
|
), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dl_path = dl_manager.download(_DOWNLOAD_URLS) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"filepath": dl_path[split], |
|
}, |
|
) |
|
for split in [ |
|
datasets.Split.TRAIN, |
|
datasets.Split.VALIDATION, |
|
datasets.Split.TEST, |
|
] |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
|
|
with open(filepath, "r") as fp: |
|
for idx, line in enumerate(fp): |
|
data = json.loads(line) |
|
|
|
|
|
if "title" not in data: |
|
data["title"] = "" |
|
yield idx, data |
|
|