Datasets:
Tasks:
Question Answering
Languages:
English
Size:
10K<n<100K
ArXiv:
Tags:
multihop-tabular-text-qa
License:
File size: 6,572 Bytes
e3d13e5 1f45c80 e3d13e5 ad22bfa e3d13e5 ad22bfa e3d13e5 2962e06 e3d13e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HybridQA: A Dataset of Multi-Hop Question Answering over Tabular and Textual Data"""
import json
import os
import datasets
_CITATION = """\
@article{chen2020hybridqa,
title={HybridQA: A Dataset of Multi-Hop Question Answering over Tabular and Textual Data},
author={Chen, Wenhu and Zha, Hanwen and Chen, Zhiyu and Xiong, Wenhan and Wang, Hong and Wang, William},
journal={Findings of EMNLP 2020},
year={2020}
}
"""
_DESCRIPTION = """\
Existing question answering datasets focus on dealing with homogeneous information, based either only on text or \
KB/Table information alone. However, as human knowledge is distributed over heterogeneous forms, \
using homogeneous information alone might lead to severe coverage problems. \
To fill in the gap, we present HybridQA, a new large-scale question-answering dataset that \
requires reasoning on heterogeneous information. Each question is aligned with a Wikipedia table \
and multiple free-form corpora linked with the entities in the table. The questions are designed \
to aggregate both tabular information and text information, i.e., \
lack of either form would render the question unanswerable.
"""
_HOMEPAGE = "https://hybridqa.github.io/index.html"
_WIKI_TABLES_GIT_ARCHIVE_URL = "WikiTables-WithLinks.zip"
_QA_DATA_BASE_URL = "https://raw.githubusercontent.com/wenhuchen/HybridQA/master/released_data"
_URLS = {
"train": f"{_QA_DATA_BASE_URL}/train.json",
"dev": f"{_QA_DATA_BASE_URL}/dev.json",
"test": f"{_QA_DATA_BASE_URL}/test.json",
}
class HybridQa(datasets.GeneratorBasedBuilder):
"""HybridQA: A Dataset of Multi-Hop Question Answering over Tabular and Textual Data"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="hybrid_qa",
version=datasets.Version("1.0.0"),
),
]
def _info(self):
features = {
"question_id": datasets.Value("string"),
"question": datasets.Value("string"),
"table_id": datasets.Value("string"),
"answer_text": datasets.Value("string"),
"question_postag": datasets.Value("string"),
"table": {
"url": datasets.Value("string"),
"title": datasets.Value("string"),
"header": datasets.Sequence(datasets.Value("string")),
"data": [
{
"value": datasets.Value("string"),
"urls": [{"url": datasets.Value("string"), "summary": datasets.Value("string")}],
}
],
"section_title": datasets.Value("string"),
"section_text": datasets.Value("string"),
"uid": datasets.Value("string"),
"intro": datasets.Value("string"),
},
}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
extracted_path = dl_manager.download_and_extract(_WIKI_TABLES_GIT_ARCHIVE_URL)
downloaded_files = dl_manager.download(_URLS)
repo_path = os.path.join(extracted_path, "WikiTables-WithLinks")
tables_path = os.path.join(repo_path, "tables_tok")
requests_path = os.path.join(repo_path, "request_tok")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"qa_filepath": downloaded_files["train"],
"tables_path": tables_path,
"requests_path": requests_path,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"qa_filepath": downloaded_files["dev"],
"tables_path": tables_path,
"requests_path": requests_path,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"qa_filepath": downloaded_files["test"],
"tables_path": tables_path,
"requests_path": requests_path,
},
),
]
def _generate_examples(self, qa_filepath, tables_path, requests_path):
with open(qa_filepath, encoding="utf-8") as f:
examples = json.load(f)
for example in examples:
table_id = example["table_id"]
table_file_path = os.path.join(tables_path, f"{table_id}.json")
url_data_path = os.path.join(requests_path, f"{table_id}.json")
try:
with open(table_file_path, encoding="utf-8") as f:
table = json.load(f)
with open(url_data_path, encoding="utf-8") as f:
url_data = json.load(f)
except KeyError: # Some JSON files were not properly added to the GitHub repo: filenames with ':', '"'
continue
table["header"] = [header[0] for header in table["header"]]
# here each row is a list with two elemets, the row value and list of urls for that row
# convert it to list of dict with keys value and urls
rows = []
for row in table["data"]:
for col in row:
new_row = {"value": col[0]}
urls = col[1]
new_row["urls"] = [{"url": url, "summary": url_data[url]} for url in urls]
rows.append(new_row)
table["data"] = rows
example["answer_text"] = example.pop("answer-text") if "answer-text" in example else ""
example["table"] = table
yield example["question_id"], example
|