|
import copy |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\@inproceedings{parikh2020totto, |
|
title={{ToTTo}: A Controlled Table-To-Text Generation Dataset}, |
|
author={Parikh, Ankur P and Wang, Xuezhi and Gehrmann, Sebastian and Faruqui, Manaal and Dhingra, Bhuwan and Yang, Diyi and Das, Dipanjan}, |
|
booktitle={Proceedings of EMNLP}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
ToTTo is an open-domain English table-to-text dataset with over 120,000 training examples that proposes a controlled generation task: given a Wikipedia table and a set of highlighted table cells, produce a one-sentence description. |
|
""" |
|
|
|
_URLs = { |
|
"totto": { |
|
"data": "https://storage.googleapis.com/totto-public/totto_data.zip", |
|
"challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/totto.zip", |
|
}, |
|
} |
|
|
|
|
|
def _add_adjusted_col_offsets(table): |
|
"""Add adjusted column offsets to take into account multi-column cells.""" |
|
adjusted_table = [] |
|
for row in table: |
|
real_col_index = 0 |
|
adjusted_row = [] |
|
for cell in row: |
|
adjusted_cell = copy.deepcopy(cell) |
|
adjusted_cell["adjusted_col_start"] = real_col_index |
|
adjusted_cell["adjusted_col_end"] = ( |
|
adjusted_cell["adjusted_col_start"] + adjusted_cell["column_span"] |
|
) |
|
real_col_index += adjusted_cell["column_span"] |
|
adjusted_row.append(adjusted_cell) |
|
adjusted_table.append(adjusted_row) |
|
return adjusted_table |
|
|
|
|
|
def _get_heuristic_row_headers(adjusted_table, row_index, col_index): |
|
"""Heuristic to find row headers.""" |
|
row_headers = [] |
|
row = adjusted_table[row_index] |
|
for i in range(0, col_index): |
|
if row[i]["is_header"]: |
|
row_headers.append(row[i]) |
|
return row_headers |
|
|
|
|
|
def _get_heuristic_col_headers(adjusted_table, row_index, col_index): |
|
"""Heuristic to find column headers.""" |
|
adjusted_cell = adjusted_table[row_index][col_index] |
|
adjusted_col_start = adjusted_cell["adjusted_col_start"] |
|
adjusted_col_end = adjusted_cell["adjusted_col_end"] |
|
col_headers = [] |
|
for r in range(0, row_index): |
|
row = adjusted_table[r] |
|
for cell in row: |
|
if ( |
|
cell["adjusted_col_start"] < adjusted_col_end |
|
and cell["adjusted_col_end"] > adjusted_col_start |
|
): |
|
if cell["is_header"]: |
|
col_headers.append(cell) |
|
|
|
return col_headers |
|
|
|
|
|
def get_highlighted_subtable(table, cell_indices, with_heuristic_headers=False): |
|
"""Extract out the highlighted part of a table.""" |
|
highlighted_table = [] |
|
|
|
adjusted_table = _add_adjusted_col_offsets(table) |
|
|
|
for (row_index, col_index) in cell_indices: |
|
cell = table[row_index][col_index] |
|
if with_heuristic_headers: |
|
row_headers = _get_heuristic_row_headers( |
|
adjusted_table, row_index, col_index |
|
) |
|
col_headers = _get_heuristic_col_headers( |
|
adjusted_table, row_index, col_index |
|
) |
|
else: |
|
row_headers = [] |
|
col_headers = [] |
|
|
|
highlighted_cell = { |
|
"cell": cell, |
|
"row_headers": row_headers, |
|
"col_headers": col_headers, |
|
} |
|
highlighted_table.append(highlighted_cell) |
|
|
|
return highlighted_table |
|
|
|
|
|
def linearize_subtable(subtable, table_page_title, table_section_title): |
|
"""Linearize the highlighted subtable and return a string of its contents.""" |
|
table_str = "" |
|
if table_page_title: |
|
table_str += "<page_title> " + table_page_title + " </page_title> " |
|
if table_section_title: |
|
table_str += "<section_title> " + table_section_title + " </section_title> " |
|
table_str += "<table> " |
|
|
|
for item in subtable: |
|
cell = item["cell"] |
|
row_headers = item["row_headers"] |
|
col_headers = item["col_headers"] |
|
|
|
|
|
item_str = "<cell> " + cell["value"] + " " |
|
|
|
|
|
for col_header in col_headers: |
|
item_str += "<col_header> " + col_header["value"] + " </col_header> " |
|
|
|
|
|
for row_header in row_headers: |
|
item_str += "<row_header> " + row_header["value"] + " </row_header> " |
|
|
|
item_str += "</cell> " |
|
table_str += item_str |
|
|
|
table_str += "</table>" |
|
return table_str |
|
|
|
|
|
def linearize(example): |
|
table = example["table"] |
|
table_page_title = example["table_page_title"] |
|
table_section_title = example["table_section_title"] |
|
cell_indices = example["highlighted_cells"] |
|
subtable = get_highlighted_subtable( |
|
table=table, cell_indices=cell_indices, with_heuristic_headers=True |
|
) |
|
|
|
subtable_metadata_str = linearize_subtable( |
|
subtable=subtable, |
|
table_page_title=table_page_title, |
|
table_section_title=table_section_title, |
|
) |
|
return subtable_metadata_str |
|
|
|
|
|
class Totto(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="totto", |
|
version=datasets.Version("1.0.0"), |
|
description=f"GEM benchmark: struct2text task", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"gem_parent_id": datasets.Value("string"), |
|
"totto_id": datasets.Value("int32"), |
|
"table_page_title": datasets.Value("string"), |
|
"table_webpage_url": datasets.Value("string"), |
|
"table_section_title": datasets.Value("string"), |
|
"table_section_text": datasets.Value("string"), |
|
"table": [ |
|
[ |
|
{ |
|
"column_span": datasets.Value("int32"), |
|
"is_header": datasets.Value("bool"), |
|
"row_span": datasets.Value("int32"), |
|
"value": datasets.Value("string"), |
|
} |
|
] |
|
], |
|
"highlighted_cells": [[datasets.Value("int32")]], |
|
"example_id": datasets.Value("string"), |
|
"sentence_annotations": [ |
|
{ |
|
"original_sentence": datasets.Value("string"), |
|
"sentence_after_deletion": datasets.Value("string"), |
|
"sentence_after_ambiguity": datasets.Value("string"), |
|
"final_sentence": datasets.Value("string"), |
|
} |
|
], |
|
"overlap_subset": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")], |
|
"linearized_input": datasets.Value("string"), |
|
}, |
|
), |
|
supervised_keys=None, |
|
homepage="", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
dl_dir = dl_manager.download_and_extract(_URLs[self.config.name]) |
|
challenge_sets = [ |
|
("challenge_train_sample", "train_totto_RandomSample500.json"), |
|
("challenge_validation_sample", "validation_totto_RandomSample500.json"), |
|
|
|
] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
dl_dir["data"], "totto_data/totto_train_data.jsonl" |
|
), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
dl_dir["data"], "totto_data/totto_dev_data.jsonl" |
|
), |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
dl_dir["data"], "totto_data/unlabeled_totto_test_data.jsonl" |
|
), |
|
"split": "test", |
|
}, |
|
), |
|
] + [ |
|
datasets.SplitGenerator( |
|
name=challenge_split, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
dl_dir["challenge_set"], self.config.name, filename |
|
), |
|
"split": challenge_split, |
|
}, |
|
) |
|
for challenge_split, filename in challenge_sets |
|
] |
|
|
|
def _generate_examples(self, filepath, split, filepaths=None, lang=None): |
|
"""Yields examples.""" |
|
if "challenge" in split: |
|
exples = json.load(open(filepath, encoding="utf-8")) |
|
if isinstance(exples, dict): |
|
assert len(exples) == 1, "multiple entries found" |
|
exples = list(exples.values())[0] |
|
for id_, exple in enumerate(exples): |
|
if len(exple) == 0: |
|
continue |
|
exple["gem_parent_id"] = exple["gem_id"] |
|
exple["gem_id"] = f"{self.config.name}-{split}-{id_}" |
|
exple["linearized_input"] = linearize(exple) |
|
yield id_, exple |
|
else: |
|
with open(filepath, "r", encoding="utf-8") as json_file: |
|
json_list = list(json_file) |
|
id_ = -1 |
|
i = -1 |
|
for json_str in json_list: |
|
result = json.loads(json_str) |
|
linearized_input = linearize(result) |
|
if split == "train": |
|
i += 1 |
|
for sentence in result["sentence_annotations"]: |
|
id_ += 1 |
|
response = { |
|
"gem_id": f"{self.config.name}-{split}-{id_}", |
|
"gem_parent_id": f"{self.config.name}-{split}-{id_}", |
|
"totto_id": i, |
|
"table_page_title": result["table_page_title"], |
|
"table_webpage_url": result["table_webpage_url"], |
|
"table_section_title": result["table_section_title"], |
|
"table_section_text": result["table_section_text"], |
|
"table": result["table"], |
|
"highlighted_cells": result["highlighted_cells"], |
|
"example_id": str(result["example_id"]), |
|
"overlap_subset": "none", |
|
"sentence_annotations": [sentence], |
|
"references": [sentence["final_sentence"]], |
|
"target": sentence["final_sentence"], |
|
"linearized_input": linearized_input, |
|
} |
|
yield id_, response |
|
else: |
|
id_ += 1 |
|
response = { |
|
"gem_id": f"{self.config.name}-{split}-{id_}", |
|
"gem_parent_id": f"{self.config.name}-{split}-{id_}", |
|
"totto_id": id_, |
|
"table_page_title": result["table_page_title"], |
|
"table_webpage_url": result["table_webpage_url"], |
|
"table_section_title": result["table_section_title"], |
|
"table_section_text": result["table_section_text"], |
|
"table": result["table"], |
|
"highlighted_cells": result["highlighted_cells"], |
|
"example_id": str(result["example_id"]), |
|
"overlap_subset": str(result["overlap_subset"]), |
|
"linearized_input": linearized_input, |
|
} |
|
response["sentence_annotations"] = ( |
|
[] if split == "test" else result["sentence_annotations"] |
|
) |
|
response["references"] = [ |
|
sentence["final_sentence"] |
|
for sentence in response["sentence_annotations"] |
|
] |
|
response["target"] = ( |
|
response["references"][0] |
|
if len(response["references"]) > 0 |
|
else "" |
|
) |
|
yield id_, response |
|
|