Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
a50fce1
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. com_qa.py +125 -0
  3. dataset_infos.json +1 -0
  4. dummy/0.1.0/dummy_data.zip +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
com_qa.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(com_qa): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(com_qa): BibTeX citation
12
+ _CITATION = """\
13
+ @inproceedings{abujabal-etal-2019-comqa,
14
+ title = "{ComQA: A Community-sourced Dataset for Complex Factoid Question Answering with Paraphrase Clusters",
15
+ author = {Abujabal, Abdalghani and
16
+ Saha Roy, Rishiraj and
17
+ Yahya, Mohamed and
18
+ Weikum, Gerhard},
19
+ booktitle = {Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)},
20
+ month = {jun},
21
+ year = {2019},
22
+ address = {Minneapolis, Minnesota},
23
+ publisher = {Association for Computational Linguistics},
24
+ url = {https://www.aclweb.org/anthology/N19-1027},
25
+ doi = {10.18653/v1/N19-1027{,
26
+ pages = {307--317},
27
+ }
28
+ """
29
+
30
+ # TODO(com_qa):
31
+ _DESCRIPTION = """\
32
+ ComQA is a dataset of 11,214 questions, which were collected from WikiAnswers, a community question answering website.
33
+ By collecting questions from such a site we ensure that the information needs are ones of interest to actual users.
34
+ Moreover, questions posed there are often cannot be answered by commercial search engines or QA technology, making them
35
+ more interesting for driving future research compared to those collected from an engine's query log. The dataset contains
36
+ questions with various challenging phenomena such as the need for temporal reasoning, comparison (e.g., comparatives,
37
+ superlatives, ordinals), compositionality (multiple, possibly nested, subquestions with multiple entities), and
38
+ unanswerable questions (e.g., Who was the first human being on Mars?). Through a large crowdsourcing effort, questions
39
+ in ComQA are grouped into 4,834 paraphrase clusters that express the same information need. Each cluster is annotated
40
+ with its answer(s). ComQA answers come in the form of Wikipedia entities wherever possible. Wherever the answers are
41
+ temporal or measurable quantities, TIMEX3 and the International System of Units (SI) are used for normalization.
42
+ """
43
+ _URL = "https://qa.mpi-inf.mpg.de/comqa"
44
+ _TRAIN_FILE = "comqa_train.json"
45
+ _DEV_FILE = "comqa_dev.json"
46
+ _TEST_FILE = "comqa_test.json"
47
+
48
+
49
+ class ComQa(datasets.GeneratorBasedBuilder):
50
+ """TODO(com_qa): Short description of my dataset."""
51
+
52
+ # TODO(com_qa): Set up version.
53
+ VERSION = datasets.Version("0.1.0")
54
+
55
+ def _info(self):
56
+ # TODO(com_qa): Specifies the datasets.DatasetInfo object
57
+ return datasets.DatasetInfo(
58
+ # This is the description that will appear on the datasets page.
59
+ description=_DESCRIPTION,
60
+ # datasets.features.FeatureConnectors
61
+ features=datasets.Features(
62
+ {
63
+ "cluster_id": datasets.Value("string"),
64
+ "questions": datasets.features.Sequence(datasets.Value("string")),
65
+ "answers": datasets.features.Sequence(datasets.Value("string")),
66
+ # These are the features of your dataset like images, labels ...
67
+ }
68
+ ),
69
+ # If there's a common (input, target) tuple from the features,
70
+ # specify them here. They'll be used if as_supervised=True in
71
+ # builder.as_dataset.
72
+ supervised_keys=None,
73
+ # Homepage of the dataset for documentation
74
+ homepage="http://qa.mpi-inf.mpg.de/comqa/",
75
+ citation=_CITATION,
76
+ )
77
+
78
+ def _split_generators(self, dl_manager):
79
+ """Returns SplitGenerators."""
80
+ # TODO(com_qa): Downloads the data and defines the splits
81
+ # dl_manager is a datasets.download.DownloadManager that can be used to
82
+ # download and extract URLs
83
+ urls_to_download = {
84
+ "train": os.path.join(_URL, _TRAIN_FILE),
85
+ "dev": os.path.join(_URL, _DEV_FILE),
86
+ "test": os.path.join(_URL, _TEST_FILE),
87
+ }
88
+ dl_dir = dl_manager.download_and_extract(urls_to_download)
89
+ return [
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN,
92
+ # These kwargs will be passed to _generate_examples
93
+ gen_kwargs={"filepath": dl_dir["train"], "split": "train"},
94
+ ),
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TEST,
97
+ # These kwargs will be passed to _generate_examples
98
+ gen_kwargs={"filepath": dl_dir["test"], "split": "test"},
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.VALIDATION,
102
+ # These kwargs will be passed to _generate_examples
103
+ gen_kwargs={"filepath": dl_dir["dev"], "split": "dev"},
104
+ ),
105
+ ]
106
+
107
+ def _generate_examples(self, filepath, split):
108
+ """Yields examples."""
109
+ # TODO(com_qa): Yields (key, example) tuples from the dataset
110
+ with open(filepath, encoding="utf-8") as f:
111
+ data = json.load(f)
112
+ for id_, example in enumerate(data):
113
+ questions = []
114
+ if split == "test":
115
+ cluster_id = str(example["id"])
116
+ questions.append(example["question"])
117
+ else:
118
+ cluster_id = example["cluster_id"]
119
+ questions = example["questions"]
120
+ answers = example["answers"]
121
+ yield id_, {
122
+ "cluster_id": cluster_id,
123
+ "questions": questions,
124
+ "answers": answers,
125
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "ComQA is a dataset of 11,214 questions, which were collected from WikiAnswers, a community question answering website. \nBy collecting questions from such a site we ensure that the information needs are ones of interest to actual users. \nMoreover, questions posed there are often cannot be answered by commercial search engines or QA technology, making them \nmore interesting for driving future research compared to those collected from an engine's query log. The dataset contains \nquestions with various challenging phenomena such as the need for temporal reasoning, comparison (e.g., comparatives, \nsuperlatives, ordinals), compositionality (multiple, possibly nested, subquestions with multiple entities), and \nunanswerable questions (e.g., Who was the first human being on Mars?). Through a large crowdsourcing effort, questions \nin ComQA are grouped into 4,834 paraphrase clusters that express the same information need. Each cluster is annotated \nwith its answer(s). ComQA answers come in the form of Wikipedia entities wherever possible. Wherever the answers are \ntemporal or measurable quantities, TIMEX3 and the International System of Units (SI) are used for normalization.\n", "citation": "@inproceedings{abujabal-etal-2019-comqa,\n title = \"{ComQA: A Community-sourced Dataset for Complex Factoid Question Answering with Paraphrase Clusters\",\n author = {Abujabal, Abdalghani and\n Saha Roy, Rishiraj and\n Yahya, Mohamed and\n Weikum, Gerhard},\n booktitle = {Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)},\n month = {jun},\n year = {2019},\n address = {Minneapolis, Minnesota},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/N19-1027},\n doi = {10.18653/v1/N19-1027{,\n pages = {307--317},\n }\n", "homepage": "http://qa.mpi-inf.mpg.de/comqa/", "license": "", "features": {"cluster_id": {"dtype": "string", "id": null, "_type": "Value"}, "questions": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "com_qa", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 696645, "num_examples": 3966, "dataset_name": "com_qa"}, "test": {"name": "test", "num_bytes": 273384, "num_examples": 2243, "dataset_name": "com_qa"}, "validation": {"name": "validation", "num_bytes": 131945, "num_examples": 966, "dataset_name": "com_qa"}}, "download_checksums": {"https://qa.mpi-inf.mpg.de/comqa/comqa_train.json": {"num_bytes": 1054334, "checksum": "aa25b2747221a6147737066fa7f8509414a6333e6eebb20d7970fc50166eba9c"}, "https://qa.mpi-inf.mpg.de/comqa/comqa_dev.json": {"num_bytes": 213246, "checksum": "937f898f996e9a7317d311553cf5b43c4a53e0bb841ae8eed4d8cf15936fbc84"}, "https://qa.mpi-inf.mpg.de/comqa/comqa_test.json": {"num_bytes": 404104, "checksum": "b81e19c4108198781167ddb30d26172571e76cc8e684aabb181537a45e816867"}}, "download_size": 1671684, "dataset_size": 1101974, "size_in_bytes": 2773658}}
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b949caabd4538a76bb9c1c4ad535752c354b8e4c8d143ca599df0a41382a1c8
3
+ size 1300