common_gen / common_gen.py
Sebastian Gehrmann
.
22e3128
import json
import os
import datasets
_CITATION = """\
@inproceedings{lin-etal-2020-commongen,
title = "{C}ommon{G}en: A Constrained Text Generation Challenge for Generative Commonsense Reasoning",
author = "Lin, Bill Yuchen and
Zhou, Wangchunshu and
Shen, Ming and
Zhou, Pei and
Bhagavatula, Chandra and
Choi, Yejin and
Ren, Xiang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.findings-emnlp.165",
pages = "1823--1840",
}
"""
_DESCRIPTION = """\
CommonGen is a constrained text generation task, associated with a benchmark
dataset, to explicitly test machines for the ability of generative commonsense
reasoning. Given a set of common concepts; the task is to generate a coherent
sentence describing an everyday scenario using these concepts.
"""
_URLs = {
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/common_gen/commongen_data.zip",
"challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/common_gen.zip",
}
class CommonGen(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
DEFAULT_CONFIG_NAME = "common_gen"
def _info(self):
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"gem_parent_id": datasets.Value("string"),
"concept_set_id": datasets.Value("int32"),
"concepts": [datasets.Value("string")],
"target": datasets.Value("string"), # single target for train
"references": [
datasets.Value("string")
], # multiple references for validation
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=datasets.info.SupervisedKeysData(
input="concepts", output="target"
),
homepage="https://inklab.usc.edu/CommonGen/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URLs)
challenge_sets = [
("challenge_train_sample", "train_common_gen_RandomSample500.json"),
(
"challenge_validation_sample",
"validation_common_gen_RandomSample500.json",
),
(
"challenge_test_scramble",
"test_common_gen_ScrambleInputStructure500.json",
),
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "commongen.train.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "commongen.dev.jsonl"),
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(
dl_dir["data"], "commongen.test_noref.jsonl"
),
"split": "test",
},
),
] + [
datasets.SplitGenerator(
name=challenge_split,
gen_kwargs={
"filepath": os.path.join(
dl_dir["challenge_set"], "common_gen", filename
),
"split": challenge_split,
},
)
for challenge_split, filename in challenge_sets
]
def _generate_examples(self, filepath, split, filepaths=None, lang=None):
"""Yields examples."""
if split.startswith("challenge"):
exples = json.load(open(filepath, encoding="utf-8"))
if isinstance(exples, dict):
assert len(exples) == 1, "multiple entries found"
exples = list(exples.values())[0]
for id_, exple in enumerate(exples):
if len(exple) == 0:
continue
exple["gem_parent_id"] = exple["gem_id"]
exple["gem_id"] = f"common_gen-{split}-{id_}"
yield id_, exple
else:
with open(filepath, encoding="utf-8") as f:
id_ = -1
i = -1
for row in f:
row = row.replace(", }", "}") # Fix possible JSON format error
data = json.loads(row)
concepts = [word for word in data["concept_set"].split("#")]
if split == "train":
i += 1
for scene in data["scene"]:
id_ += 1
yield id_, {
"gem_id": f"common_gen-{split}-{id_}",
"gem_parent_id": f"common_gen-{split}-{id_}",
"concept_set_id": i,
"concepts": concepts,
"target": scene,
"references": [],
}
else:
id_ += 1
yield id_, {
"gem_id": f"common_gen-{split}-{id_}",
"gem_parent_id": f"common_gen-{split}-{id_}",
"concept_set_id": id_,
"concepts": concepts,
"target": "" if split == "test" else data["scene"][0],
"references": [] if split == "test" else data["scene"],
}