Nix commited on
Commit
f44d9f7
1 Parent(s): b72fa8a

Delete CLUTRR_v1.py

Browse files
Files changed (1) hide show
  1. CLUTRR_v1.py +0 -155
CLUTRR_v1.py DELETED
@@ -1,155 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """CLUTRR_Dataset Loading Script.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1q9DdeHA5JbgTHkH6kfZe_KWHQOwHZA97
8
- """
9
- # coding=utf-8
10
- # Copyright 2019 The CLUTRR Datasets Authors and the HuggingFace Datasets Authors.
11
- #
12
- # CLUTRR is CC-BY-NC 4.0 (Attr Non-Commercial Inter.) licensed, as found in the LICENSE file.
13
- #
14
- # Unless required by applicable law or agreed to in writing, software
15
- # distributed under the License is distributed on an "AS IS" BASIS,
16
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
- # See the License for the specific language governing permissions and
18
- # limitations under the License.
19
-
20
- # Lint as: python3
21
- """The CLUTRR (Compositional Language Understanding and Text-based Relational Reasoning) benchmark."""
22
-
23
-
24
- import csv
25
- import os
26
- import textwrap
27
-
28
- import numpy as np
29
-
30
- import datasets
31
- import json
32
-
33
- _CLUTRR_CITATION = """\
34
- @article{sinha2019clutrr,
35
- Author = {Koustuv Sinha and Shagun Sodhani and Jin Dong and Joelle Pineau and William L. Hamilton},
36
- Title = {CLUTRR: A Diagnostic Benchmark for Inductive Reasoning from Text},
37
- Year = {2019},
38
- journal = {Empirical Methods of Natural Language Processing (EMNLP)},
39
- arxiv = {1908.06177}
40
- }
41
- """
42
-
43
- _CLUTRR_DESCRIPTION = """\
44
- CLUTRR (Compositional Language Understanding and Text-based Relational Reasoning),
45
- a diagnostic benchmark suite, is first introduced in (https://arxiv.org/abs/1908.06177)
46
- to test the systematic generalization and inductive reasoning capabilities of NLU systems.
47
-
48
- """
49
- _URL = "https://raw.githubusercontent.com/kliang5/CLUTRR_huggingface_dataset/main/"
50
- _TASK = ["gen_train23_test2to10", "gen_train234_test2to10", "rob_train_clean_23_test_all_23", "rob_train_disc_23_test_all_23", "rob_train_irr_23_test_all_23","rob_train_sup_23_test_all_23"]
51
-
52
- class CLUTRR_v1(datasets.GeneratorBasedBuilder):
53
- """BuilderConfig for CLUTRR."""
54
-
55
- BUILDER_CONFIGS = [
56
- datasets.BuilderConfig(
57
- name=task,
58
- version=datasets.Version("1.0.0"),
59
- description="",
60
- )
61
- for task in _TASK
62
- ]
63
-
64
- def _info(self):
65
- return datasets.DatasetInfo(
66
- description=_CLUTRR_DESCRIPTION,
67
- features=datasets.Features(
68
- {
69
- "id": datasets.Value("string"),
70
- "story": datasets.Value("string"),
71
- "query": datasets.Value("string"),
72
- "target": datasets.Value("int32"),
73
- "clean_story": datasets.Value("string"),
74
- "proof_state": datasets.Value("string"),
75
- "f_comb": datasets.Value("string"),
76
- "task_name": datasets.Value("string"),
77
- "story_edges": datasets.Value("string"),
78
- "edge_types": datasets.Value("string"),
79
- "query_edge": datasets.Value("string"),
80
- "genders": datasets.Value("string"),
81
- "task_split": datasets.Value("string"),
82
- }
83
- ),
84
- # No default supervised_keys (as we have to pass both premise
85
- # and hypothesis as input).
86
- supervised_keys=None,
87
- homepage="https://www.cs.mcgill.ca/~ksinha4/clutrr/",
88
- citation=_CLUTRR_CITATION,
89
- )
90
-
91
- def _split_generators(self, dl_manager):
92
- """Returns SplitGenerators."""
93
- # dl_manager is a datasets.download.DownloadManager that can be used to
94
- # download and extract URLs
95
-
96
- task = str(self.config.name)
97
- urls_to_download = {
98
- "test": _URL + task + "/test.csv",
99
- "train": _URL + task + "/train.csv",
100
- "validation": _URL + task + "/validation.csv",
101
- }
102
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
103
-
104
-
105
- return [
106
- datasets.SplitGenerator(
107
- name=datasets.Split.TRAIN,
108
- # These kwargs will be passed to _generate_examples
109
- gen_kwargs={
110
- "filepath": downloaded_files["train"],
111
- "task": task,
112
- },
113
- ),
114
- datasets.SplitGenerator(
115
- name=datasets.Split.VALIDATION,
116
- # These kwargs will be passed to _generate_examples
117
- gen_kwargs={
118
- "filepath": downloaded_files["validation"],
119
- "task": task,
120
- },
121
- ),
122
- datasets.SplitGenerator(
123
- name=datasets.Split.TEST,
124
- # These kwargs will be passed to _generate_examples
125
- gen_kwargs={
126
- "filepath": downloaded_files["test"],
127
- "task": task,
128
- },
129
- ),
130
- ]
131
-
132
- def _generate_examples(self, filepath, task):
133
- """Yields examples."""
134
- with open(filepath, encoding="utf-8") as f:
135
- reader = csv.reader(f)
136
- for id_, data in enumerate(reader):
137
- if id_ == 0:
138
- continue
139
- # yield id_, data
140
- # id_ += 1
141
- yield id_, {
142
- "id": data[1],
143
- "story": data[2],
144
- "query": data[3],
145
- "target": data[4],
146
- "clean_story": data[5],
147
- "proof_state": data[6],
148
- "f_comb": data[7],
149
- "task_name": data[8],
150
- "story_edges": data[9],
151
- "edge_types": data[10],
152
- "query_edge": data[11],
153
- "genders": data[12],
154
- "task_split": data[13],
155
- }