Nix commited on
Commit
d86fa34
1 Parent(s): 44a3a51

Delete CLUTRR_v1.0.py

Browse files
Files changed (1) hide show
  1. CLUTRR_v1.0.py +0 -144
CLUTRR_v1.0.py DELETED
@@ -1,144 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2019 The CLUTRR Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # CLUTRR is CC-BY-NC 4.0 (Attr Non-Commercial Inter.) licensed, as found in the LICENSE file.
5
- #
6
- # Unless required by applicable law or agreed to in writing, software
7
- # distributed under the License is distributed on an "AS IS" BASIS,
8
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
- # See the License for the specific language governing permissions and
10
- # limitations under the License.
11
-
12
- # Lint as: python3
13
- """The CLUTRR (Compositional Language Understanding and Text-based Relational Reasoning) benchmark."""
14
-
15
-
16
- import csv
17
- import os
18
- import textwrap
19
-
20
- import numpy as np
21
-
22
- import datasets
23
-
24
-
25
- _CLUTRR_CITATION = """\
26
- @article{sinha2019clutrr,
27
- Author = {Koustuv Sinha and Shagun Sodhani and Jin Dong and Joelle Pineau and William L. Hamilton},
28
- Title = {CLUTRR: A Diagnostic Benchmark for Inductive Reasoning from Text},
29
- Year = {2019},
30
- journal = {Empirical Methods of Natural Language Processing (EMNLP)},
31
- arxiv = {1908.06177}
32
- }
33
- """
34
-
35
- _CLUTRR_DESCRIPTION = """\
36
- CLUTRR (Compositional Language Understanding and Text-based Relational Reasoning),
37
- a diagnostic benchmark suite, is first introduced in (https://arxiv.org/abs/1908.06177)
38
- to test the systematic generalization and inductive reasoning capabilities of NLU systems.
39
-
40
- """
41
- _URL = "https://github.com/kliang5/CLUTRR_huggingface_dataset/tree/main/"
42
- _TASK = ["gen_train23_test2to10", "gen_train234_test2to10", "rob_train_clean_23_test_all_23", "rob_train_disc_23_test_all_23", "rob_train_irr_23_test_all_23","rob_train_sup_23_test_all_23"]
43
-
44
- class GLUTRR(datasets.GeneratorBasedBuilder):
45
- """BuilderConfig for GLUTRR."""
46
-
47
- BUILDER_CONFIGS = [
48
- datasets.BuilderConfig(
49
- name=task,
50
- version=datasets.Version("1.0.0"),
51
- description="",
52
- )
53
- for task in _TASK
54
- ]
55
-
56
- def _info(self):
57
- return datasets.DatasetInfo(
58
- description=_CLUTRR_DESCRIPTION,
59
- features=datasets.Features(
60
- {
61
- "id": datasets.Value("string"),
62
- "story": datasets.Value("string"),
63
- "query": datasets.Value("string"),
64
- "target": datasets.Value("string"),
65
- "clean_story": datasets.Value("string"),
66
- "proof_state": datasets.Value("string"),
67
- "f_comb": datasets.Value("string"),
68
- "task_name": datasets.Value("string"),
69
- "story_edges": datasets.Value("string"),
70
- "edge_types": datasets.Value("string"),
71
- "query_edge": datasets.Value("string"),
72
- "genders": datasets.Value("string"),
73
- "task_split": datasets.Value("string"),
74
- }
75
- ),
76
- # No default supervised_keys (as we have to pass both premise
77
- # and hypothesis as input).
78
- supervised_keys=None,
79
- homepage="https://www.cs.mcgill.ca/~ksinha4/clutrr/",
80
- citation=_CLUTRR_CITATION,
81
- )
82
-
83
- def _split_generators(self, dl_manager):
84
- """Returns SplitGenerators."""
85
- # dl_manager is a datasets.download.DownloadManager that can be used to
86
- # download and extract URLs
87
-
88
- task = str(self.config.name)
89
- urls_to_download = {
90
- "test": _URL + task + "/test.csv",
91
- "train": _URL + task + "/train.csv",
92
- "validation": _URL + task + "/validation.csv",
93
- }
94
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
95
-
96
- return [
97
- datasets.SplitGenerator(
98
- name=datasets.Split.TRAIN,
99
- # These kwargs will be passed to _generate_examples
100
- gen_kwargs={
101
- "filepath": os.path.join(downloaded_files["train"], task + "_train.jsonl"),
102
- "task": task,
103
- },
104
- ),
105
- datasets.SplitGenerator(
106
- name=datasets.Split.VALIDATION,
107
- # These kwargs will be passed to _generate_examples
108
- gen_kwargs={
109
- "filepath": os.path.join(downloaded_files["validation"], task + "_val.jsonl"),
110
- "task": task,
111
- },
112
- ),
113
- datasets.SplitGenerator(
114
- name=datasets.Split.TEST,
115
- # These kwargs will be passed to _generate_examples
116
- gen_kwargs={
117
- "filepath": os.path.join(downloaded_files["test"], task + "_test.jsonl"),
118
- "task": task,
119
- },
120
- ),
121
- ]
122
-
123
- def _generate_examples(self, filepath, task):
124
- """Yields examples."""
125
- with open(filepath, encoding="utf-8") as f:
126
- i = 0
127
- for line in f:
128
- data = json.loads(line)
129
- i += 1
130
- yield i, {
131
- "id": data["id"],
132
- "story": data["story"],
133
- "query": data["query"],
134
- "target": data["target"],
135
- "clean_story": data["clean_story"],
136
- "proof_state": data["proof_state"],
137
- "f_comb": data["f_comb"],
138
- "task_name": data["task_name"],
139
- "story_edges": data["story_edges"],
140
- "edge_types": data["edge_types"],
141
- "query_edge": data["query_edge"],
142
- "genders": data["genders"],
143
- "task_split": data["task_split"],
144
- }