import datasets import os import json _CITATION = "" _DESCRIPTION = """ Scenario for single document text summarization. Currently supports the following datasets: 1. XSum (https://arxiv.org/pdf/1808.08745.pdf) 2. CNN/DailyMail non-anonymized (https://arxiv.org/pdf/1704.04368.pdf) Task prompt structure Summarize the given document. Document: {tok_1 ... tok_n} Summary: {tok_1 ... tok_m} Example from XSum dataset Document: {Part of the Broad Road was closed to traffic on Sunday at about 18:00 GMT. The three adults and three children have been taken to Altnagelvin Hospital with non life-threatening injuries. The Fire Service, Northern Ireland Ambulance Service and police attended the crash. The Broad Road has since been reopened.} Summary: {Three adults and three children have been taken to hospital following a crash involving a tractor and a campervan in Limavady, County Londonderry} """ class Summarization(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description="") for name in ["xsum", "xsum-sampled", "cnn-dm"] ] def _info(self): features = datasets.Features( { "article": datasets.Value("string"), "summary": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage="", license="", citation=_CITATION, ) def _split_generators(self, dl_manager): train_json = dl_manager.download(os.path.join(self.config.name, "train.jsonl")) test_json = dl_manager.download(os.path.join(self.config.name, "test.jsonl")) val_json = dl_manager.download(os.path.join(self.config.name, "validation.jsonl")) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"path": train_json}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"path": test_json}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"path": val_json}, ) ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, path): with open(path, encoding="utf-8") as f: for key, row in enumerate(f): yield key, json.loads(row)