conclugen / conclugen.py
Shahbaz Syed
Update loading script
06c6ad5
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ConcluGen Dataset"""
import json
import datasets
_CITATION = """\
@inproceedings{syed:2021,
author = {Shahbaz Syed and
Khalid Al Khatib and
Milad Alshomary and
Henning Wachsmuth and
Martin Potthast},
editor = {Chengqing Zong and
Fei Xia and
Wenjie Li and
Roberto Navigli},
title = {Generating Informative Conclusions for Argumentative Texts},
booktitle = {Findings of the Association for Computational Linguistics: {ACL/IJCNLP}
2021, Online Event, August 1-6, 2021},
pages = {3482--3493},
publisher = {Association for Computational Linguistics},
year = {2021},
url = {https://doi.org/10.18653/v1/2021.findings-acl.306},
doi = {10.18653/v1/2021.findings-acl.306}
}
"""
_DESCRIPTION = """\
The ConcluGen corpus is constructed for the task of argument summarization. It consists of 136,996 pairs of argumentative texts and their conclusions collected from the ChangeMyView subreddit, a web portal for argumentative discussions on controversial topics.
The corpus has three variants: aspects, topics, and targets. Each variation encodes the corresponding information via control codes. These provide additional argumentative knowledge for generating more informative conclusions.
"""
_HOMEPAGE = "https://zenodo.org/record/4818134"
_LICENSE = "https://creativecommons.org/licenses/by/4.0/legalcode"
_REPO = "https://huggingface.co/datasets/webis/conclugen/resolve/main"
_URLS = {
'base_train': f"{_REPO}/base_train.jsonl",
'base_validation': f"{_REPO}/base_validation.jsonl",
'base_test': f"{_REPO}/base_test.jsonl",
'aspects_train': f"{_REPO}/aspects_train.jsonl",
'aspects_validation': f"{_REPO}/aspects_validation.jsonl",
'aspects_test': f"{_REPO}/aspects_test.jsonl",
'targets_train': f"{_REPO}/targets_train.jsonl",
'targets_validation': f"{_REPO}/targets_validation.jsonl",
'targets_test': f"{_REPO}/targets_test.jsonl",
'topic_train': f"{_REPO}/topic_train.jsonl",
'topic_validation': f"{_REPO}/topic_validation.jsonl",
'topic_test': f"{_REPO}/topic_test.jsonl"
}
class ConcluGen(datasets.GeneratorBasedBuilder):
"""382,545 arguments crawled from debate portals"""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="base", version=VERSION, description="The base version of the dataset with no argumentative knowledge."),
datasets.BuilderConfig(name="aspects", version=VERSION, description="Variation with argument aspects encoded."),
datasets.BuilderConfig(name="targets", version=VERSION, description="Variation with conclusion targets encoded."),
datasets.BuilderConfig(name="topic", version=VERSION, description="Variation with discussion topic encoded."),
]
DEFAULT_CONFIG_NAME = "base"
def _info(self):
features = datasets.Features(
{
"argument": datasets.Value("string"),
"conclusion": datasets.Value("string"),
"id": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
train_file = dl_manager.download(_URLS[self.config.name+"_train"])
validation_file = dl_manager.download(_URLS[self.config.name+"_validation"])
test_file = dl_manager.download(_URLS[self.config.name+"_test"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": train_file,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": validation_file,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": test_file,
},
)
]
def _generate_examples(self, data_file):
""" Yields examples as (key, example) tuples. """
with open(data_file, encoding="utf-8") as f:
for row in f:
data = json.loads(row)
id_ = data['id']
yield id_, {
"argument": data['argument'],
"conclusion": data["conclusion"],
"id": id_
}