Datasets:
GEM
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
totto / totto.py
Abinaya Mahendiran
Fixed url for totto data
ce3fe91
raw
history blame
8.44 kB
import json
import os
import datasets
_CITATION = """\@inproceedings{parikh2020totto,
title={{ToTTo}: A Controlled Table-To-Text Generation Dataset},
author={Parikh, Ankur P and Wang, Xuezhi and Gehrmann, Sebastian and Faruqui, Manaal and Dhingra, Bhuwan and Yang, Diyi and Das, Dipanjan},
booktitle={Proceedings of EMNLP},
year={2020}
}
"""
_DESCRIPTION = """\
ToTTo is an open-domain English table-to-text dataset with over 120,000 training examples that proposes a controlled generation task: given a Wikipedia table and a set of highlighted table cells, produce a one-sentence description.
"""
_URLs = {
"totto": {
"data": "https://storage.googleapis.com/totto-public/totto_data.zip",
"challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/totto.zip",
},
}
class Mlsum(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="totto",
version=datasets.Version("1.0.0"),
description=f"GEM benchmark: struct2text task",
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"gem_parent_id": datasets.Value("string"),
"totto_id": datasets.Value("int32"),
"table_page_title": datasets.Value("string"),
"table_webpage_url": datasets.Value("string"),
"table_section_title": datasets.Value("string"),
"table_section_text": datasets.Value("string"),
"table": [
[
{
"column_span": datasets.Value("int32"),
"is_header": datasets.Value("bool"),
"row_span": datasets.Value("int32"),
"value": datasets.Value("string"),
}
]
],
"highlighted_cells": [[datasets.Value("int32")]],
"example_id": datasets.Value("string"),
"sentence_annotations": [
{
"original_sentence": datasets.Value("string"),
"sentence_after_deletion": datasets.Value("string"),
"sentence_after_ambiguity": datasets.Value("string"),
"final_sentence": datasets.Value("string"),
}
],
"overlap_subset": datasets.Value("string"),
"target": datasets.Value("string"), # single target for train
"references": [datasets.Value("string")],
},
),
supervised_keys=None,
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
challenge_sets = [
("challenge_train_sample", "train_totto_RandomSample500.json"),
("challenge_validation_sample", "validation_totto_RandomSample500.json"),
("challenge_test_scramble", "test_totto_ScrambleInputStructure500.json"),
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "totto_data/totto_train_data.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "totto_data/totto_dev_data.jsonl"),
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "totto_data/unlabeled_totto_test_data.jsonl"),
"split": "test",
},
),
] + [
datasets.SplitGenerator(
name=challenge_split,
gen_kwargs={
"filepath": os.path.join(dl_dir["challenge_set"], self.config.name, filename),
"split": challenge_split,
},
)
for challenge_split, filename in challenge_sets
]
def _generate_examples(self, filepath, split, filepaths=None, lang=None):
"""Yields examples."""
if "challenge" in split:
exples = json.load(open(filepath, encoding="utf-8"))
if isinstance(exples, dict):
assert len(exples) == 1, "multiple entries found"
exples = list(exples.values())[0]
for id_, exple in enumerate(exples):
if len(exple) == 0:
continue
exple["gem_parent_id"] = exple["gem_id"]
exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
yield id_, exple
else:
with open(filepath, "r", encoding="utf-8") as json_file:
json_list = list(json_file)
id_ = -1
i = -1
for json_str in json_list:
result = json.loads(json_str)
if split == "train":
i += 1
for sentence in result["sentence_annotations"]:
id_ += 1
response = {
"gem_id": f"{self.config.name}-{split}-{id_}",
"gem_parent_id": f"{self.config.name}-{split}-{id_}",
"totto_id": i,
"table_page_title": result["table_page_title"],
"table_webpage_url": result["table_webpage_url"],
"table_section_title": result["table_section_title"],
"table_section_text": result["table_section_text"],
"table": result["table"],
"highlighted_cells": result["highlighted_cells"],
"example_id": str(result["example_id"]),
"overlap_subset": "none",
"sentence_annotations": [sentence],
"references": [],
"target": sentence["final_sentence"],
}
yield id_, response
else:
id_ += 1
response = {
"gem_id": f"{self.config.name}-{split}-{id_}",
"gem_parent_id": f"{self.config.name}-{split}-{id_}",
"totto_id": id_,
"table_page_title": result["table_page_title"],
"table_webpage_url": result["table_webpage_url"],
"table_section_title": result["table_section_title"],
"table_section_text": result["table_section_text"],
"table": result["table"],
"highlighted_cells": result["highlighted_cells"],
"example_id": str(result["example_id"]),
"overlap_subset": str(result["overlap_subset"]),
}
response["sentence_annotations"] = [] if split == "test" else result["sentence_annotations"]
response["references"] = [
sentence["final_sentence"] for sentence in response["sentence_annotations"]
]
response["target"] = response["references"][0] if len(response["references"]) > 0 else ""
yield id_, response