import csv import json import os import datasets _CITATION = """\ @inproceedings{e2e_cleaned, address = {Tokyo, Japan}, title = {Semantic {Noise} {Matters} for {Neural} {Natural} {Language} {Generation}}, url = {https://www.aclweb.org/anthology/W19-8652/}, booktitle = {Proceedings of the 12th {International} {Conference} on {Natural} {Language} {Generation} ({INLG} 2019)}, author = {Dušek, Ondřej and Howcroft, David M and Rieser, Verena}, year = {2019}, pages = {421--426}, } """ _DESCRIPTION = """\ The E2E dataset is designed for a limited-domain data-to-text task -- generation of restaurant descriptions/recommendations based on up to 8 different attributes (name, area, price range etc.). """ _URLs = { "train": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv", "validation": "https://raw.githubusercontent.com/jordiclive/GEM_datasets/main/e2e/validation.json", "test": "https://raw.githubusercontent.com/jordiclive/GEM_datasets/main/e2e/test.json", "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/e2e_nlg.zip", } class E2ENlg(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.1") DEFAULT_CONFIG_NAME = "e2e_nlg" def _info(self): features = datasets.Features( { "gem_id": datasets.Value("string"), "gem_parent_id": datasets.Value("string"), "meaning_representation": datasets.Value("string"), "target": datasets.Value("string"), "references": [datasets.Value("string")], } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=datasets.info.SupervisedKeysData( input="meaning_representation", output="target" ), homepage="http://www.macs.hw.ac.uk/InteractionLab/E2E/", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" dl_dir = dl_manager.download_and_extract(_URLs) challenge_sets = [ ("challenge_train_sample", "train_e2e_nlg_RandomSample500.json"), ("challenge_validation_sample", "validation_e2e_nlg_RandomSample500.json"), ("challenge_test_scramble", "test_e2e_nlg_ScrambleInputStructure500.json"), ] return [ datasets.SplitGenerator( name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl} ) for spl in ["train", "validation", "test"] ] + [ datasets.SplitGenerator( name=challenge_split, gen_kwargs={ "filepath": os.path.join( dl_dir["challenge_set"], "e2e_nlg", filename ), "split": challenge_split, }, ) for challenge_split, filename in challenge_sets ] def _generate_examples(self, filepath, split, filepaths=None, lang=None): """Yields examples.""" if split.startswith("challenge"): exples = json.load(open(filepath, encoding="utf-8")) if isinstance(exples, dict): assert len(exples) == 1, "multiple entries found" exples = list(exples.values())[0] for id_, exple in enumerate(exples): if len(exple) == 0: continue exple["gem_parent_id"] = exple["gem_id"] exple["gem_id"] = f"e2e_nlg-{split}-{id_}" yield id_, exple if split.startswith("test") or split.startswith("validation"): exples = json.load(open(filepath, encoding="utf-8")) if isinstance(exples, dict): assert len(exples) == 1, "multiple entries found" exples = list(exples.values())[0] for id_, exple in enumerate(exples): if len(exple) == 0: continue yield id_, { "gem_id": f"e2e_nlg-{split}-{id_}", "gem_parent_id": f"e2e_nlg-{split}-{id_}", "meaning_representation": exple["meaning_representation"], "target": exple["references"][0], "references": exple["references"], } else: with open(filepath, encoding="utf-8") as f: reader = csv.DictReader(f) for id_, example in enumerate(reader): yield id_, { "gem_id": f"e2e_nlg-{split}-{id_}", "gem_parent_id": f"e2e_nlg-{split}-{id_}", "meaning_representation": example["mr"], "target": example["ref"], "references": [] }