# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TODO: Add a description here.""" import csv import json import os import datasets _CITATION = """\ @inproceedings{puduppully-etal-2019-data, title = "Data-to-text Generation with Entity Modeling", author = "Puduppully, Ratish and Dong, Li and Lapata, Mirella", booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2019", address = "Florence, Italy", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P19-1195", doi = "10.18653/v1/P19-1195", pages = "2023--2035", } """ _DESCRIPTION = """\ The MLB dataset for data to text generation contains Major League Baseball games statistics and their human-written summaries. """ _HOMEPAGE = "https://github.com/ratishsp/mlb-data-scripts" _LICENSE = "" _URLs = { "train": "train.jsonl.bz2", "validation": "validation.jsonl.bz2", "test": "test.jsonl.bz2" } class MlbDataToText(datasets.GeneratorBasedBuilder): """MLB dataset for data to text generation""" VERSION = datasets.Version("1.1.0") def _info(self): features = datasets.Features( { "home_name": datasets.Value("string"), "box_score": [ { "p_l": datasets.Value("string"), "last_name": datasets.Value("string"), "p_h": datasets.Value("string"), "sac": datasets.Value("string"), "p_bb": datasets.Value("string"), "pos": datasets.Value("string"), "ao": datasets.Value("string"), "p_bf": datasets.Value("string"), "cs": datasets.Value("string"), "hbp": datasets.Value("string"), "ab": datasets.Value("string"), "full_name": datasets.Value("string"), "p_w": datasets.Value("string"), "go": datasets.Value("string"), "fldg": datasets.Value("string"), "p_bs": datasets.Value("string"), "avg": datasets.Value("string"), "p_r": datasets.Value("string"), "p_s": datasets.Value("string"), "lob": datasets.Value("string"), "first_name": datasets.Value("string"), "p_sv": datasets.Value("string"), "p_so": datasets.Value("string"), "p_save": datasets.Value("string"), "p_hr": datasets.Value("string"), "po": datasets.Value("string"), "p_ip1": datasets.Value("string"), "p_ip2": datasets.Value("string"), "bb": datasets.Value("string"), "ops": datasets.Value("string"), "p_hld": datasets.Value("string"), "bo": datasets.Value("string"), "p_loss": datasets.Value("string"), "e": datasets.Value("string"), "p_game_score": datasets.Value("string"), "p_win": datasets.Value("string"), "a": datasets.Value("string"), "p_era": datasets.Value("string"), "d": datasets.Value("string"), "p_out": datasets.Value("string"), "h": datasets.Value("string"), "p_er": datasets.Value("string"), "p_np": datasets.Value("string"), "hr": datasets.Value("string"), "r": datasets.Value("string"), "so": datasets.Value("string"), "t": datasets.Value("string"), "rbi": datasets.Value("string"), "team": datasets.Value("string"), "sb": datasets.Value("string"), "slg": datasets.Value("string"), "sf": datasets.Value("string"), "obp": datasets.Value("string"), } ], "home_city": datasets.Value("string"), "vis_name": datasets.Value("string"), "play_by_play": [{ "top": [{ "runs": datasets.Value("string"), "scorers": [ datasets.Value("string") ], "pitcher": datasets.Value("string"), "o": datasets.Value("string"), "b": datasets.Value("string"), "s": datasets.Value("string"), "batter": datasets.Value("string"), "b1": [ datasets.Value("string") ], "b2": [ datasets.Value("string") ], "b3": [ datasets.Value("string") ], "event": datasets.Value("string"), } ], "bottom": [{ "runs": datasets.Value("string"), "scorers": [ datasets.Value("string") ], "pitcher": datasets.Value("string"), "o": datasets.Value("string"), "b": datasets.Value("string"), "s": datasets.Value("string"), "batter": datasets.Value("string"), "b1": [ datasets.Value("string") ], "b2": [ datasets.Value("string") ], "b3": [ datasets.Value("string") ], "event": datasets.Value("string"), } ], "inning": datasets.Value("string") } ], "vis_line": { "innings": [{ "inn": datasets.Value("string"), "runs": datasets.Value("string") } ], "result": datasets.Value("string"), "team_runs": datasets.Value("string"), "team_hits": datasets.Value("string"), "team_errors": datasets.Value("string"), "team_name": datasets.Value("string"), "team_city": datasets.Value("string") }, "home_line": { "innings": [{ "inn": datasets.Value("string"), "runs": datasets.Value("string") } ], "result": datasets.Value("string"), "team_runs": datasets.Value("string"), "team_hits": datasets.Value("string"), "team_errors": datasets.Value("string"), "team_name": datasets.Value("string"), "team_city": datasets.Value("string") }, "vis_city": datasets.Value("string"), "day": datasets.Value("string"), "summary": [ datasets.Value("string"), ], "gem_id": datasets.Value("string") } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive train_dir = dl_manager.download_and_extract(_URLs["train"]) validation_dir = dl_manager.download_and_extract(_URLs["validation"]) test_dir = dl_manager.download_and_extract(_URLs["test"]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": train_dir, "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": test_dir, "split": "test" }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": validation_dir, "split": "validation", }, ), ] def _generate_examples( self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` ): """ Yields examples as (key, example) tuples. """ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. # The `key` is here for legacy reason (tfds) and is not important in itself. with open(filepath, encoding="utf-8") as f: for id_, row in enumerate(f): data = json.loads(row) yield id_, { "home_name": data["home_name"], "box_score": data["box_score"], "home_city": data["home_city"], "vis_name": data["vis_name"], "play_by_play": data["play_by_play"], "vis_line": data["vis_line"], "vis_city": data["vis_city"], "day": data["day"], "home_line": data["home_line"], "summary": data["summary"], "gem_id": data["gem_id"] }