Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
found
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
davzoku commited on
Commit
b40a463
1 Parent(s): 761df5f

Delete loading script

Browse files
Files changed (1) hide show
  1. piqa.py +0 -136
piqa.py DELETED
@@ -1,136 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """PIQA dataset."""
16
-
17
-
18
- import json
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{Bisk2020,
26
- author = {Yonatan Bisk and Rowan Zellers and
27
- Ronan Le Bras and Jianfeng Gao
28
- and Yejin Choi},
29
- title = {PIQA: Reasoning about Physical Commonsense in
30
- Natural Language},
31
- booktitle = {Thirty-Fourth AAAI Conference on
32
- Artificial Intelligence},
33
- year = {2020},
34
- }
35
- """
36
-
37
- _DESCRIPTION = """\
38
- To apply eyeshadow without a brush, should I use a cotton swab or a toothpick?
39
- Questions requiring this kind of physical commonsense pose a challenge to state-of-the-art
40
- natural language understanding systems. The PIQA dataset introduces the task of physical commonsense reasoning
41
- and a corresponding benchmark dataset Physical Interaction: Question Answering or PIQA.
42
-
43
- Physical commonsense knowledge is a major challenge on the road to true AI-completeness,
44
- including robots that interact with the world and understand natural language.
45
-
46
- PIQA focuses on everyday situations with a preference for atypical solutions.
47
- The dataset is inspired by instructables.com, which provides users with instructions on how to build, craft,
48
- bake, or manipulate objects using everyday materials.
49
-
50
- The underlying task is formualted as multiple choice question answering:
51
- given a question `q` and two possible solutions `s1`, `s2`, a model or
52
- a human must choose the most appropriate solution, of which exactly one is correct.
53
- The dataset is further cleaned of basic artifacts using the AFLite algorithm which is an improvement of
54
- adversarial filtering. The dataset contains 16,000 examples for training, 2,000 for development and 3,000 for testing.
55
- """
56
-
57
- _URLs = {
58
- "train-dev": "https://storage.googleapis.com/ai2-mosaic/public/physicaliqa/physicaliqa-train-dev.zip",
59
- "test": "https://yonatanbisk.com/piqa/data/tests.jsonl",
60
- }
61
-
62
-
63
- class Piqa(datasets.GeneratorBasedBuilder):
64
- """PIQA dataset."""
65
-
66
- VERSION = datasets.Version("1.1.0")
67
-
68
- BUILDER_CONFIGS = [
69
- datasets.BuilderConfig(
70
- name="plain_text",
71
- description="Plain text",
72
- version=VERSION,
73
- )
74
- ]
75
-
76
- def _info(self):
77
- return datasets.DatasetInfo(
78
- description=_DESCRIPTION,
79
- features=datasets.Features(
80
- {
81
- "goal": datasets.Value("string"),
82
- "sol1": datasets.Value("string"),
83
- "sol2": datasets.Value("string"),
84
- "label": datasets.ClassLabel(names=["0", "1"]),
85
- }
86
- ),
87
- supervised_keys=None,
88
- homepage="https://yonatanbisk.com/piqa/",
89
- citation=_CITATION,
90
- )
91
-
92
- def _split_generators(self, dl_manager):
93
- """Returns SplitGenerators."""
94
- data_dir = dl_manager.download_and_extract(_URLs)
95
- return [
96
- datasets.SplitGenerator(
97
- name=datasets.Split.TRAIN,
98
- gen_kwargs={
99
- "input_filepath": os.path.join(data_dir["train-dev"], "physicaliqa-train-dev", "train.jsonl"),
100
- "label_filepath": os.path.join(data_dir["train-dev"], "physicaliqa-train-dev", "train-labels.lst"),
101
- },
102
- ),
103
- datasets.SplitGenerator(
104
- name=datasets.Split.TEST,
105
- gen_kwargs={
106
- "input_filepath": data_dir["test"],
107
- },
108
- ),
109
- datasets.SplitGenerator(
110
- name=datasets.Split.VALIDATION,
111
- gen_kwargs={
112
- "input_filepath": os.path.join(data_dir["train-dev"], "physicaliqa-train-dev", "dev.jsonl"),
113
- "label_filepath": os.path.join(data_dir["train-dev"], "physicaliqa-train-dev", "dev-labels.lst"),
114
- },
115
- ),
116
- ]
117
-
118
- def _generate_examples(self, input_filepath, label_filepath=None):
119
- """Yields examples."""
120
- with open(input_filepath, encoding="utf-8") as input_file:
121
- inputs = input_file.read().splitlines()
122
-
123
- if label_filepath is not None:
124
- with open(label_filepath, encoding="utf-8") as label_file:
125
- labels = label_file.read().splitlines()
126
- else:
127
- # Labels are not available for the test set.
128
- # Filling the `label` column with -1 by default
129
- labels = [-1] * len(inputs)
130
-
131
- for idx, (row, lab) in enumerate(zip(inputs, labels)):
132
- data = json.loads(row)
133
- goal = data["goal"]
134
- sol1 = data["sol1"]
135
- sol2 = data["sol2"]
136
- yield idx, {"goal": goal, "sol1": sol1, "sol2": sol2, "label": lab}