Commit
•
528450d
0
Parent(s):
Update files from the datasets library (from 1.0.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.0.0
- .gitattributes +27 -0
- dataset_infos.json +1 -0
- dummy/0.1.0/dummy_data.zip +3 -0
- wiqa.py +114 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"default": {"description": "The WIQA dataset V1 has 39705 questions containing a perturbation and a possible effect in the context of a paragraph. \nThe dataset is split into 29808 train questions, 6894 dev questions and 3003 test questions.\n", "citation": "@article{wiqa,\n author = {Niket Tandon and Bhavana Dalvi Mishra and Keisuke Sakaguchi and Antoine Bosselut and Peter Clark}\n title = {WIQA: A dataset for \"What if...\" reasoning over procedural text},\n journal = {arXiv:1909.04739v1},\n year = {2019},\n}\n", "homepage": "https://allenai.org/data/wiqa", "license": "", "features": {"question_stem": {"dtype": "string", "id": null, "_type": "Value"}, "question_para_step": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "answer_label": {"dtype": "string", "id": null, "_type": "Value"}, "answer_label_as_choice": {"dtype": "string", "id": null, "_type": "Value"}, "choices": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "metadata_question_id": {"dtype": "string", "id": null, "_type": "Value"}, "metadata_graph_id": {"dtype": "string", "id": null, "_type": "Value"}, "metadata_para_id": {"dtype": "string", "id": null, "_type": "Value"}, "metadata_question_type": {"dtype": "string", "id": null, "_type": "Value"}, "metadata_path_len": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "wiqa", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17089298, "num_examples": 29808, "dataset_name": "wiqa"}, "test": {"name": "test", "num_bytes": 1532223, "num_examples": 3003, "dataset_name": "wiqa"}, "validation": {"name": "validation", "num_bytes": 3779584, "num_examples": 6894, "dataset_name": "wiqa"}}, "download_checksums": {"https://public-aristo-processes.s3-us-west-2.amazonaws.com/wiqa_dataset_no_explanation_v2/wiqa-dataset-v2-october-2019.zip": {"num_bytes": 5247733, "checksum": "afdab9bc33d814576f76516017f2b39dd101f8770f3f29ab6be2846ff59efb43"}}, "download_size": 5247733, "dataset_size": 22401105, "size_in_bytes": 27648838}}
|
dummy/0.1.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:878f302120c4548bda2e88da2e246f73f730f5a710dd6e753b95819bcbe70860
|
3 |
+
size 2387
|
wiqa.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""TODO(wiqa): Add a description here."""
|
2 |
+
|
3 |
+
from __future__ import absolute_import, division, print_function
|
4 |
+
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
|
8 |
+
import datasets
|
9 |
+
|
10 |
+
|
11 |
+
# TODO(wiqa): BibTeX citation
|
12 |
+
_CITATION = """\
|
13 |
+
@article{wiqa,
|
14 |
+
author = {Niket Tandon and Bhavana Dalvi Mishra and Keisuke Sakaguchi and Antoine Bosselut and Peter Clark}
|
15 |
+
title = {WIQA: A dataset for "What if..." reasoning over procedural text},
|
16 |
+
journal = {arXiv:1909.04739v1},
|
17 |
+
year = {2019},
|
18 |
+
}
|
19 |
+
"""
|
20 |
+
|
21 |
+
# TODO(wiqa):
|
22 |
+
_DESCRIPTION = """\
|
23 |
+
The WIQA dataset V1 has 39705 questions containing a perturbation and a possible effect in the context of a paragraph.
|
24 |
+
The dataset is split into 29808 train questions, 6894 dev questions and 3003 test questions.
|
25 |
+
"""
|
26 |
+
_URL = "https://public-aristo-processes.s3-us-west-2.amazonaws.com/wiqa_dataset_no_explanation_v2/wiqa-dataset-v2-october-2019.zip"
|
27 |
+
URl = "s3://ai2-s2-research-public/open-corpus/2020-04-10/"
|
28 |
+
|
29 |
+
|
30 |
+
class Wiqa(datasets.GeneratorBasedBuilder):
|
31 |
+
"""TODO(wiqa): Short description of my dataset."""
|
32 |
+
|
33 |
+
# TODO(wiqa): Set up version.
|
34 |
+
VERSION = datasets.Version("0.1.0")
|
35 |
+
|
36 |
+
def _info(self):
|
37 |
+
# TODO(wiqa): Specifies the datasets.DatasetInfo object
|
38 |
+
return datasets.DatasetInfo(
|
39 |
+
# This is the description that will appear on the datasets page.
|
40 |
+
description=_DESCRIPTION,
|
41 |
+
# datasets.features.FeatureConnectors
|
42 |
+
features=datasets.Features(
|
43 |
+
{
|
44 |
+
# These are the features of your dataset like images, labels ...
|
45 |
+
"question_stem": datasets.Value("string"),
|
46 |
+
"question_para_step": datasets.features.Sequence(datasets.Value("string")),
|
47 |
+
"answer_label": datasets.Value("string"),
|
48 |
+
"answer_label_as_choice": datasets.Value("string"),
|
49 |
+
"choices": datasets.features.Sequence(
|
50 |
+
{"text": datasets.Value("string"), "label": datasets.Value("string")}
|
51 |
+
),
|
52 |
+
"metadata_question_id": datasets.Value("string"),
|
53 |
+
"metadata_graph_id": datasets.Value("string"),
|
54 |
+
"metadata_para_id": datasets.Value("string"),
|
55 |
+
"metadata_question_type": datasets.Value("string"),
|
56 |
+
"metadata_path_len": datasets.Value("int32"),
|
57 |
+
}
|
58 |
+
),
|
59 |
+
# If there's a common (input, target) tuple from the features,
|
60 |
+
# specify them here. They'll be used if as_supervised=True in
|
61 |
+
# builder.as_dataset.
|
62 |
+
supervised_keys=None,
|
63 |
+
# Homepage of the dataset for documentation
|
64 |
+
homepage="https://allenai.org/data/wiqa",
|
65 |
+
citation=_CITATION,
|
66 |
+
)
|
67 |
+
|
68 |
+
def _split_generators(self, dl_manager):
|
69 |
+
"""Returns SplitGenerators."""
|
70 |
+
# TODO(wiqa): Downloads the data and defines the splits
|
71 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to
|
72 |
+
# download and extract URLs
|
73 |
+
dl_dir = dl_manager.download_and_extract(_URL)
|
74 |
+
|
75 |
+
return [
|
76 |
+
datasets.SplitGenerator(
|
77 |
+
name=datasets.Split.TRAIN,
|
78 |
+
# These kwargs will be passed to _generate_examples
|
79 |
+
gen_kwargs={"filepath": os.path.join(dl_dir, "train.jsonl")},
|
80 |
+
),
|
81 |
+
datasets.SplitGenerator(
|
82 |
+
name=datasets.Split.TEST,
|
83 |
+
# These kwargs will be passed to _generate_examples
|
84 |
+
gen_kwargs={"filepath": os.path.join(dl_dir, "test.jsonl")},
|
85 |
+
),
|
86 |
+
datasets.SplitGenerator(
|
87 |
+
name=datasets.Split.VALIDATION,
|
88 |
+
# These kwargs will be passed to _generate_examples
|
89 |
+
gen_kwargs={"filepath": os.path.join(dl_dir, "dev.jsonl")},
|
90 |
+
),
|
91 |
+
]
|
92 |
+
|
93 |
+
def _generate_examples(self, filepath):
|
94 |
+
"""Yields examples."""
|
95 |
+
# TODO(wiqa): Yields (key, example) tuples from the dataset
|
96 |
+
with open(filepath, encoding="utf-8") as f:
|
97 |
+
for id_, row in enumerate(f):
|
98 |
+
data = json.loads(row)
|
99 |
+
|
100 |
+
yield id_, {
|
101 |
+
"question_stem": data["question"]["stem"],
|
102 |
+
"question_para_step": data["question"]["para_steps"],
|
103 |
+
"answer_label": data["question"]["answer_label"],
|
104 |
+
"answer_label_as_choice": data["question"]["answer_label_as_choice"],
|
105 |
+
"choices": {
|
106 |
+
"text": [choice["text"] for choice in data["question"]["choices"]],
|
107 |
+
"label": [choice["label"] for choice in data["question"]["choices"]],
|
108 |
+
},
|
109 |
+
"metadata_question_id": data["metadata"]["ques_id"],
|
110 |
+
"metadata_graph_id": data["metadata"]["graph_id"],
|
111 |
+
"metadata_para_id": data["metadata"]["para_id"],
|
112 |
+
"metadata_question_type": data["metadata"]["question_type"],
|
113 |
+
"metadata_path_len": data["metadata"]["path_len"],
|
114 |
+
}
|