# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TODO: Add a description here.""" import csv import json import os import datasets _CITATION = """\ @inproceedings{puduppully-etal-2019-data, title = "Data-to-text Generation with Entity Modeling", author = "Puduppully, Ratish and Dong, Li and Lapata, Mirella", booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2019", address = "Florence, Italy", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P19-1195", doi = "10.18653/v1/P19-1195", pages = "2023--2035", } """ _DESCRIPTION = """\ The MLB dataset for data to text generation contains Major League Baseball games statistics and their human-written summaries. """ _HOMEPAGE = "https://github.com/ratishsp/mlb-data-scripts" _LICENSE = "" _URLs = { "train": "train.jsonl", "validation": "validation.jsonl", "test": "test.jsonl" } class MlbDataToText(datasets.GeneratorBasedBuilder): """MLB dataset for data to text generation""" VERSION = datasets.Version("1.1.0") def _info(self): features = datasets.Features( { "home_name": datasets.Value("string"), "box_score": dict, "home_city": datasets.Value("string"), "vis_name": datasets.Value("string"), "play_by_play": dict, "vis_line": dict, "vis_city": datasets.Value("string"), "day": datasets.Value("string"), "home_line": dict, "summary": list, "gem_id": datasets.Value("string") } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive data_dir = dl_manager.download_and_extract(_URLs) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": data_dir["train"], "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": data_dir["test"], "split": "test" }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": data_dir["validation"], "split": "validation", }, ), ] def _generate_examples( self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` ): """ Yields examples as (key, example) tuples. """ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. # The `key` is here for legacy reason (tfds) and is not important in itself. with open(filepath, encoding="utf-8") as f: for id_, row in enumerate(f): data = json.loads(row) yield id_, { "home_name": data["home_name"], "box_score": data["box_score"], "home_city": data["home_city"], "vis_name": data["vis_name"], "play_by_play": data["play_by_play"], "vis_line": data["vis_line"], "vis_city": data["vis_city"], "day": data["day"], "home_line": data["home_line"], "summary": data["summary"], "gem_id": data["gem_id"] }