File size: 7,896 Bytes
6bc6221 a44e683 6bc6221 26f0f37 a44e683 26f0f37 6bc6221 a211a3c 6bc6221 a211a3c 6bc6221 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
"""The Cornell eRulemaking Corpus (CDCP) dataset for English Argumentation Mining."""
import glob
import json
from os.path import abspath, isdir
from pathlib import Path
import datasets
_CITATION = """\
@inproceedings{niculae-etal-2017-argument,
title = "Argument Mining with Structured {SVM}s and {RNN}s",
author = "Niculae, Vlad and
Park, Joonsuk and
Cardie, Claire",
booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P17-1091",
doi = "10.18653/v1/P17-1091",
pages = "985--995",
abstract = "We propose a novel factor graph model for argument mining, designed for settings in which the argumentative relations in a document do not necessarily form a tree structure. (This is the case in over 20{\\%} of the web comments dataset we release.) Our model jointly learns elementary unit type classification and argumentative relation prediction. Moreover, our model supports SVM and RNN parametrizations, can enforce structure constraints (e.g., transitivity), and can express dependencies between adjacent relations and propositions. Our approaches outperform unstructured baselines in both web comments and argumentative essay datasets.",
}
"""
_DESCRIPTION = "The CDCP dataset for English Argumentation Mining"
_HOMEPAGE = ""
_LICENSE = ""
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URL = "https://facultystaff.richmond.edu/~jpark/data/cdcp_acl17.zip"
_VERSION = datasets.Version("1.0.0")
_SPAN_CLASS_LABELS = ["fact", "policy", "reference", "testimony", "value"]
_RELATION_CLASS_LABELS = ["evidence", "reason"]
class CDCP(datasets.GeneratorBasedBuilder):
"""CDCP is a argumentation mining dataset."""
BUILDER_CONFIGS = [datasets.BuilderConfig(name="default")]
DEFAULT_CONFIG_NAME = "default" # type: ignore
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"text": datasets.Value("string"),
"propositions": datasets.Sequence(
{
"start": datasets.Value("int32"),
"end": datasets.Value("int32"),
"label": datasets.ClassLabel(names=_SPAN_CLASS_LABELS),
# urls are replaced with the string "__URL__" in the text. This contains the original url.
"url": datasets.Value("string"),
}
),
"relations": datasets.Sequence(
{
"head": datasets.Value("int32"),
"tail": datasets.Value("int32"),
"label": datasets.ClassLabel(names=_RELATION_CLASS_LABELS),
}
),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
if dl_manager.manual_dir is not None:
base_path = abspath(dl_manager.manual_dir)
if not isdir(base_path):
base_path = dl_manager.extract(base_path)
else:
base_path = dl_manager.download_and_extract(_URL)
base_path = Path(base_path) / "cdcp"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"path": base_path / "train"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"path": base_path / "test"}
),
]
def _generate_examples(self, path):
"""Yields examples."""
# This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
# It is in charge of opening the given file and yielding (key, example) tuples from the dataset
# The key is not important, it's more here for legacy reason (legacy from tfds)
_id = 0
text_file_names = sorted(glob.glob(f"{path}/*.txt"))
for text_file_name in text_file_names:
txt_fn = Path(text_file_name)
ex_id = txt_fn.stem
if ex_id == "00411":
continue # this example is broken (same text as 00410, but with missing some annotations)
ann_fn = txt_fn.with_suffix(".ann.json")
with open(txt_fn, encoding="utf-8") as f:
text = f.read()
with open(ann_fn, encoding="utf-8") as f:
annotations = json.load(f)
# example content of annotations:
# {
# 'evidences': [[[8, 8], 7]],
# 'prop_labels': ['testimony', 'testimony', 'value'],
# 'prop_offsets': [[0, 114], [114, 209], [209, 235]],
# 'reasons': [[[2, 2], 1], [ 0, 0], 2]],
# 'evidences': [[[2, 2], 1], [ 0, 0], 2]],
# 'url': {
# "3": "http://usa.visa.com/personal/using_visa/checkout_fees/",
# "4": "http://usa.visa.com/download/merchants/surcharging-faq-by-merchants.pdf"
# }
# }
propositions = [
{
"start": start,
"end": end,
"label": label,
"url": annotations["url"].get(str(idx), ""),
}
for idx, ((start, end), label) in enumerate(
zip(annotations["prop_offsets"], annotations["prop_labels"])
)
]
relations = []
for (tail_first_idx, tail_last_idx), head_idx in annotations["evidences"]:
for tail_idx in range(tail_first_idx, tail_last_idx + 1):
relations.append({"head": head_idx, "tail": tail_idx, "label": "evidence"})
for (tail_first_idx, tail_last_idx), head_idx in annotations["reasons"]:
for tail_idx in range(tail_first_idx, tail_last_idx + 1):
relations.append({"head": head_idx, "tail": tail_idx, "label": "reason"})
yield _id, {
"id": ex_id,
"text": text,
"propositions": propositions,
"relations": relations,
}
_id += 1
|