Nix commited on
Commit
202128e
1 Parent(s): 60f81cf

Delete CLUTRR_v1.py

Browse files
Files changed (1) hide show
  1. CLUTRR_v1.py +0 -153
CLUTRR_v1.py DELETED
@@ -1,153 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """CLUTRR_Dataset Loading Script.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1q9DdeHA5JbgTHkH6kfZe_KWHQOwHZA97
8
- """
9
-
10
- # coding=utf-8
11
- # Copyright 2019 The CLUTRR Datasets Authors and the HuggingFace Datasets Authors.
12
- #
13
- # CLUTRR is CC-BY-NC 4.0 (Attr Non-Commercial Inter.) licensed, as found in the LICENSE file.
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
-
21
- # Lint as: python3
22
- """The CLUTRR (Compositional Language Understanding and Text-based Relational Reasoning) benchmark."""
23
-
24
-
25
- import csv
26
- import os
27
- import textwrap
28
-
29
- import numpy as np
30
-
31
- import datasets
32
-
33
-
34
- _CLUTRR_CITATION = """\
35
- @article{sinha2019clutrr,
36
- Author = {Koustuv Sinha and Shagun Sodhani and Jin Dong and Joelle Pineau and William L. Hamilton},
37
- Title = {CLUTRR: A Diagnostic Benchmark for Inductive Reasoning from Text},
38
- Year = {2019},
39
- journal = {Empirical Methods of Natural Language Processing (EMNLP)},
40
- arxiv = {1908.06177}
41
- }
42
- """
43
-
44
- _CLUTRR_DESCRIPTION = """\
45
- CLUTRR (Compositional Language Understanding and Text-based Relational Reasoning),
46
- a diagnostic benchmark suite, is first introduced in (https://arxiv.org/abs/1908.06177)
47
- to test the systematic generalization and inductive reasoning capabilities of NLU systems.
48
-
49
- """
50
- _URL = "https://github.com/kliang5/CLUTRR_huggingface_dataset/tree/main/"
51
- _TASK = ["gen_train23_test2to10", "gen_train234_test2to10", "rob_train_clean_23_test_all_23", "rob_train_disc_23_test_all_23", "rob_train_irr_23_test_all_23","rob_train_sup_23_test_all_23"]
52
-
53
- class CLUTRR_v1(datasets.GeneratorBasedBuilder):
54
- """BuilderConfig for CLUTRR."""
55
-
56
- BUILDER_CONFIGS = [
57
- datasets.BuilderConfig(
58
- name=task,
59
- version=datasets.Version("1.0.0"),
60
- description="",
61
- )
62
- for task in _TASK
63
- ]
64
-
65
- def _info(self):
66
- return datasets.DatasetInfo(
67
- description=_CLUTRR_DESCRIPTION,
68
- features=datasets.Features(
69
- {
70
- "id": datasets.Value("string"),
71
- "story": datasets.Value("string"),
72
- "query": datasets.Value("string"),
73
- "target": datasets.Value("string"),
74
- "clean_story": datasets.Value("string"),
75
- "proof_state": datasets.Value("string"),
76
- "f_comb": datasets.Value("string"),
77
- "task_name": datasets.Value("string"),
78
- "story_edges": datasets.Value("string"),
79
- "edge_types": datasets.Value("string"),
80
- "query_edge": datasets.Value("string"),
81
- "genders": datasets.Value("string"),
82
- "task_split": datasets.Value("string"),
83
- }
84
- ),
85
- # No default supervised_keys (as we have to pass both premise
86
- # and hypothesis as input).
87
- supervised_keys=None,
88
- homepage="https://www.cs.mcgill.ca/~ksinha4/clutrr/",
89
- citation=_CLUTRR_CITATION,
90
- )
91
-
92
- def _split_generators(self, dl_manager):
93
- """Returns SplitGenerators."""
94
- # dl_manager is a datasets.download.DownloadManager that can be used to
95
- # download and extract URLs
96
-
97
- task = str(self.config.name)
98
- urls_to_download = {
99
- "test": _URL + task + "/test.csv",
100
- "train": _URL + task + "/train.csv",
101
- "validation": _URL + task + "/validation.csv",
102
- }
103
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
104
-
105
- return [
106
- datasets.SplitGenerator(
107
- name=datasets.Split.TRAIN,
108
- # These kwargs will be passed to _generate_examples
109
- gen_kwargs={
110
- "filepath": os.path.join(downloaded_files["train"], task + "_train.jsonl"),
111
- "task": task,
112
- },
113
- ),
114
- datasets.SplitGenerator(
115
- name=datasets.Split.VALIDATION,
116
- # These kwargs will be passed to _generate_examples
117
- gen_kwargs={
118
- "filepath": os.path.join(downloaded_files["validation"], task + "_val.jsonl"),
119
- "task": task,
120
- },
121
- ),
122
- datasets.SplitGenerator(
123
- name=datasets.Split.TEST,
124
- # These kwargs will be passed to _generate_examples
125
- gen_kwargs={
126
- "filepath": os.path.join(downloaded_files["test"], task + "_test.jsonl"),
127
- "task": task,
128
- },
129
- ),
130
- ]
131
-
132
- def _generate_examples(self, filepath, task):
133
- """Yields examples."""
134
- with open(filepath, encoding="utf-8") as f:
135
- i = 0
136
- for line in f:
137
- data = json.loads(line)
138
- i += 1
139
- yield i, {
140
- "id": data["id"],
141
- "story": data["story"],
142
- "query": data["query"],
143
- "target": data["target"],
144
- "clean_story": data["clean_story"],
145
- "proof_state": data["proof_state"],
146
- "f_comb": data["f_comb"],
147
- "task_name": data["task_name"],
148
- "story_edges": data["story_edges"],
149
- "edge_types": data["edge_types"],
150
- "query_edge": data["query_edge"],
151
- "genders": data["genders"],
152
- "task_split": data["task_split"],
153
- }