import json import os import datasets from datasets.tasks import TextClassification _CITATION = None _DESCRIPTION = """ WCEP10 dataset for summarization. From paper: "A Large-Scale Multi-Document Summarization Dataset from the Wikipedia Current Events Portal" by D. Gholipour et al." From paper: "PRIMER: Pyramid-based Masked Sentence Pre-training for Multi-document Summarization" by W. Xiao et al." """ _CITATION = """\ @article{DBLP:journals/corr/abs-2005-10070, author = {Demian Gholipour Ghalandari and Chris Hokamp and Nghia The Pham and John Glover and Georgiana Ifrim}, title = {A Large-Scale Multi-Document Summarization Dataset from the Wikipedia Current Events Portal}, journal = {CoRR}, volume = {abs/2005.10070}, year = {2020}, url = {https://arxiv.org/abs/2005.10070}, eprinttype = {arXiv}, eprint = {2005.10070}, timestamp = {Fri, 22 May 2020 16:21:28 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2005-10070.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } @article{DBLP:journals/corr/abs-2110-08499, author = {Wen Xiao and Iz Beltagy and Giuseppe Carenini and Arman Cohan}, title = {{PRIMER:} Pyramid-based Masked Sentence Pre-training for Multi-document Summarization}, journal = {CoRR}, volume = {abs/2110.08499}, year = {2021}, url = {https://arxiv.org/abs/2110.08499}, eprinttype = {arXiv}, eprint = {2110.08499}, timestamp = {Fri, 22 Oct 2021 13:33:09 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2110-08499.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } """ _ABSTRACT = "summary" _ARTICLE = "document" class WCEP10SummarizationConfig(datasets.BuilderConfig): """BuilderConfig for WCEP10Summarization.""" def __init__(self, **kwargs): """BuilderConfig for WCEP10Summarization. Args: **kwargs: keyword arguments forwarded to super. """ super(WCEP10SummarizationConfig, self).__init__(**kwargs) class WCEP10SummarizationDataset(datasets.GeneratorBasedBuilder): """WCEP10Summarization Dataset.""" _TRAIN_FILE = "train.zip" _VAL_FILE = "val.zip" _TEST_FILE = "test.zip" BUILDER_CONFIGS = [ WCEP10SummarizationConfig( name="newline", version=datasets.Version("1.0.0"), description="WCEP10 dataset for summarization, concat sections", ), WCEP10SummarizationConfig( name="roberta", version=datasets.Version("1.0.0"), description="WCEP10 dataset for summarization, document", ), WCEP10SummarizationConfig( name="bert", version=datasets.Version("1.0.0"), description="WCEP10 dataset for summarization, document", ), WCEP10SummarizationConfig( name="list", version=datasets.Version("1.0.0"), description="WCEP10 dataset for summarization, document", ), ] DEFAULT_CONFIG_NAME = "roberta" def _info(self): # Should return a datasets.DatasetInfo object return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { _ARTICLE: datasets.Sequence(datasets.Value("string")) if self.config.name == "list" else datasets.Value("string"), _ABSTRACT: datasets.Value("string"), #"id": datasets.Value("string"), } ), supervised_keys=None, homepage="https://github.com/allenai/PRIMER", citation=_CITATION, ) def _split_generators(self, dl_manager): train_path = os.path.join(dl_manager.download_and_extract(self._TRAIN_FILE), "train.txt") val_path = os.path.join(dl_manager.download_and_extract(self._VAL_FILE), "val.txt") test_path = os.path.join(dl_manager.download_and_extract(self._TEST_FILE), "test.txt") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path} ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": test_path} ), ] def _generate_examples(self, filepath): """Generate WCEP10Summarization examples.""" if self.config.name == "newline": join_ = "\n" elif self.config.name == "roberta": join_ = "" elif self.config.name == "bert": join_ = "[SEP]" with open(filepath, encoding="utf-8") as f: for id_, row in enumerate(f): data = json.loads(row) """ 'summary': str, 'document': List[str], """ document = data["document"] if self.config.name != "list": document = join_.join(document) summary = data["summary"] yield id_, {"document": document, "summary": summary}