Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
402ee4b
1 Parent(s): 8daec61

Delete loading script

Browse files
Files changed (1) hide show
  1. com_qa.py +0 -122
com_qa.py DELETED
@@ -1,122 +0,0 @@
1
- """TODO(com_qa): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
-
8
-
9
- # TODO(com_qa): BibTeX citation
10
- _CITATION = """\
11
- @inproceedings{abujabal-etal-2019-comqa,
12
- title = "{ComQA: A Community-sourced Dataset for Complex Factoid Question Answering with Paraphrase Clusters",
13
- author = {Abujabal, Abdalghani and
14
- Saha Roy, Rishiraj and
15
- Yahya, Mohamed and
16
- Weikum, Gerhard},
17
- booktitle = {Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)},
18
- month = {jun},
19
- year = {2019},
20
- address = {Minneapolis, Minnesota},
21
- publisher = {Association for Computational Linguistics},
22
- url = {https://www.aclweb.org/anthology/N19-1027},
23
- doi = {10.18653/v1/N19-1027{,
24
- pages = {307--317},
25
- }
26
- """
27
-
28
- # TODO(com_qa):
29
- _DESCRIPTION = """\
30
- ComQA is a dataset of 11,214 questions, which were collected from WikiAnswers, a community question answering website.
31
- By collecting questions from such a site we ensure that the information needs are ones of interest to actual users.
32
- Moreover, questions posed there are often cannot be answered by commercial search engines or QA technology, making them
33
- more interesting for driving future research compared to those collected from an engine's query log. The dataset contains
34
- questions with various challenging phenomena such as the need for temporal reasoning, comparison (e.g., comparatives,
35
- superlatives, ordinals), compositionality (multiple, possibly nested, subquestions with multiple entities), and
36
- unanswerable questions (e.g., Who was the first human being on Mars?). Through a large crowdsourcing effort, questions
37
- in ComQA are grouped into 4,834 paraphrase clusters that express the same information need. Each cluster is annotated
38
- with its answer(s). ComQA answers come in the form of Wikipedia entities wherever possible. Wherever the answers are
39
- temporal or measurable quantities, TIMEX3 and the International System of Units (SI) are used for normalization.
40
- """
41
-
42
- _URL = "https://qa.mpi-inf.mpg.de/comqa/"
43
- _URLS = {
44
- "train": _URL + "comqa_train.json",
45
- "dev": _URL + "comqa_dev.json",
46
- "test": _URL + "comqa_test.json",
47
- }
48
-
49
-
50
- class ComQa(datasets.GeneratorBasedBuilder):
51
- """TODO(com_qa): Short description of my dataset."""
52
-
53
- # TODO(com_qa): Set up version.
54
- VERSION = datasets.Version("0.1.0")
55
-
56
- def _info(self):
57
- # TODO(com_qa): Specifies the datasets.DatasetInfo object
58
- return datasets.DatasetInfo(
59
- # This is the description that will appear on the datasets page.
60
- description=_DESCRIPTION,
61
- # datasets.features.FeatureConnectors
62
- features=datasets.Features(
63
- {
64
- "cluster_id": datasets.Value("string"),
65
- "questions": datasets.features.Sequence(datasets.Value("string")),
66
- "answers": datasets.features.Sequence(datasets.Value("string")),
67
- # These are the features of your dataset like images, labels ...
68
- }
69
- ),
70
- # If there's a common (input, target) tuple from the features,
71
- # specify them here. They'll be used if as_supervised=True in
72
- # builder.as_dataset.
73
- supervised_keys=None,
74
- # Homepage of the dataset for documentation
75
- homepage="http://qa.mpi-inf.mpg.de/comqa/",
76
- citation=_CITATION,
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
- """Returns SplitGenerators."""
81
- # TODO(com_qa): Downloads the data and defines the splits
82
- # dl_manager is a datasets.download.DownloadManager that can be used to
83
- # download and extract URLs
84
- urls_to_download = _URLS
85
- dl_dir = dl_manager.download_and_extract(urls_to_download)
86
- return [
87
- datasets.SplitGenerator(
88
- name=datasets.Split.TRAIN,
89
- # These kwargs will be passed to _generate_examples
90
- gen_kwargs={"filepath": dl_dir["train"], "split": "train"},
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.TEST,
94
- # These kwargs will be passed to _generate_examples
95
- gen_kwargs={"filepath": dl_dir["test"], "split": "test"},
96
- ),
97
- datasets.SplitGenerator(
98
- name=datasets.Split.VALIDATION,
99
- # These kwargs will be passed to _generate_examples
100
- gen_kwargs={"filepath": dl_dir["dev"], "split": "dev"},
101
- ),
102
- ]
103
-
104
- def _generate_examples(self, filepath, split):
105
- """Yields examples."""
106
- # TODO(com_qa): Yields (key, example) tuples from the dataset
107
- with open(filepath, encoding="utf-8") as f:
108
- data = json.load(f)
109
- for id_, example in enumerate(data):
110
- questions = []
111
- if split == "test":
112
- cluster_id = str(example["id"])
113
- questions.append(example["question"])
114
- else:
115
- cluster_id = example["cluster_id"]
116
- questions = example["questions"]
117
- answers = example["answers"]
118
- yield id_, {
119
- "cluster_id": cluster_id,
120
- "questions": questions,
121
- "answers": answers,
122
- }