Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
n<1K
Language Creators:
found
Annotations Creators:
machine-generated
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
c0c7e39
1 Parent(s): a731474

Delete loading script

Browse files
Files changed (1) hide show
  1. cbt.py +0 -206
cbt.py DELETED
@@ -1,206 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Children's Book Test Dataset."""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @misc{hill2016goldilocks,
23
- title={The Goldilocks Principle: Reading Children's Books with Explicit Memory Representations},
24
- author={Felix Hill and Antoine Bordes and Sumit Chopra and Jason Weston},
25
- year={2016},
26
- eprint={1511.02301},
27
- archivePrefix={arXiv},
28
- primaryClass={cs.CL}
29
- }
30
- """
31
-
32
-
33
- _DESCRIPTION = """\
34
- The Children’s Book Test (CBT) is designed to measure directly
35
- how well language models can exploit wider linguistic context.
36
- The CBT is built from books that are freely available.
37
- """
38
-
39
- _HOMEPAGE = "https://research.fb.com/downloads/babi/"
40
-
41
- _LICENSE = """GNU Free Documentation License v1.3"""
42
-
43
- ZIP_URL = "data/CBTest.tgz"
44
- dir = "CBTest/data/"
45
- paths = {
46
- "raw": {"train": dir + "cbt_train.txt", "valid": dir + "cbt_valid.txt", "test": dir + "cbt_test.txt"},
47
- "V": {
48
- "train": dir + "cbtest_V_train.txt",
49
- "valid": dir + "cbtest_V_valid_2000ex.txt",
50
- "test": dir + "cbtest_V_test_2500ex.txt",
51
- },
52
- "P": {
53
- "train": dir + "cbtest_P_train.txt",
54
- "valid": dir + "cbtest_P_valid_2000ex.txt",
55
- "test": dir + "cbtest_P_test_2500ex.txt",
56
- },
57
- "NE": {
58
- "train": dir + "cbtest_NE_train.txt",
59
- "valid": dir + "cbtest_NE_valid_2000ex.txt",
60
- "test": dir + "cbtest_NE_test_2500ex.txt",
61
- },
62
- "CN": {
63
- "train": dir + "cbtest_CN_train.txt",
64
- "valid": dir + "cbtest_CN_valid_2000ex.txt",
65
- "test": dir + "cbtest_CN_test_2500ex.txt",
66
- },
67
- }
68
-
69
-
70
- class Cbt(datasets.GeneratorBasedBuilder):
71
- """TODO: Short description of my dataset."""
72
-
73
- VERSION = datasets.Version("1.1.0")
74
-
75
- BUILDER_CONFIGS = [
76
- datasets.BuilderConfig(
77
- name="raw", version=VERSION, description="This part of my dataset covers the raw CBT books"
78
- ),
79
- datasets.BuilderConfig(
80
- name="V", version=VERSION, description="This part of my dataset covers the verb answer CBT dataset"
81
- ),
82
- datasets.BuilderConfig(
83
- name="P", version=VERSION, description="This part of my dataset covers the preposition answer CBT dataset"
84
- ),
85
- datasets.BuilderConfig(
86
- name="NE",
87
- version=VERSION,
88
- description="This part of my dataset covers the named entity answer CBT dataset",
89
- ),
90
- datasets.BuilderConfig(
91
- name="CN", version=VERSION, description="This part of my dataset covers the common noun answer CBT dataset"
92
- ),
93
- ]
94
-
95
- def _info(self):
96
- if self.config.name in ["V", "P", "NE", "CN"]:
97
- features = datasets.Features(
98
- {
99
- "sentences": datasets.Sequence(datasets.Value("string")), # There are 20 sentences
100
- "question": datasets.Value("string"),
101
- "answer": datasets.Value("string"),
102
- "options": datasets.Sequence(datasets.Value("string")),
103
- }
104
- )
105
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
106
- features = datasets.Features({"title": datasets.Value("string"), "content": datasets.Value("string")})
107
- return datasets.DatasetInfo(
108
- # This is the description that will appear on the datasets page.
109
- description=_DESCRIPTION,
110
- # This defines the different columns of the dataset and their types
111
- features=features, # Here we define them above because they are different between the two configurations
112
- # If there's a common (input, target) tuple from the features,
113
- # specify them here. They'll be used if as_supervised=True in
114
- # builder.as_dataset.
115
- supervised_keys=None,
116
- # Homepage of the dataset for documentation
117
- homepage=_HOMEPAGE,
118
- # License for the dataset if available
119
- license=_LICENSE,
120
- # Citation for the dataset
121
- citation=_CITATION,
122
- )
123
-
124
- def _split_generators(self, dl_manager):
125
- """Returns SplitGenerators."""
126
- my_urls = ZIP_URL # Cannot download just one single type as it is a compressed file.
127
- archive = dl_manager.download(my_urls)
128
- return [
129
- datasets.SplitGenerator(
130
- name=datasets.Split.TRAIN,
131
- # These kwargs will be passed to _generate_examples
132
- gen_kwargs={"filepath": paths[self.config.name]["train"], "files": dl_manager.iter_archive(archive)},
133
- ),
134
- datasets.SplitGenerator(
135
- name=datasets.Split.TEST,
136
- # These kwargs will be passed to _generate_examples
137
- gen_kwargs={"filepath": paths[self.config.name]["test"], "files": dl_manager.iter_archive(archive)},
138
- ),
139
- datasets.SplitGenerator(
140
- name=datasets.Split.VALIDATION,
141
- # These kwargs will be passed to _generate_examples
142
- gen_kwargs={"filepath": paths[self.config.name]["valid"], "files": dl_manager.iter_archive(archive)},
143
- ),
144
- ]
145
-
146
- def _generate_examples(self, filepath, files):
147
- """Yields examples as (key, example) tuples."""
148
- for path, f in files:
149
- if path == filepath:
150
- if self.config.name != "raw":
151
- sentences = []
152
- example_idx = 0
153
- for idx, line in enumerate(f):
154
- line = line.decode("utf-8")
155
- if line.strip() == "":
156
- continue
157
-
158
- elif line.split()[0] == "21":
159
- splitline = line.split("\t") # question, answer options are tab separated
160
- question = splitline[0]
161
- answer = splitline[1]
162
- options = splitline[-1]
163
- question = question[2:].strip() # The first two indices contain `21`.
164
- answer = answer.strip()
165
- options = options.strip().split("|")
166
- yield example_idx, {
167
- "sentences": sentences,
168
- "question": question,
169
- "options": options,
170
- "answer": answer,
171
- }
172
-
173
- sentences = []
174
- example_idx += 1
175
- else:
176
- if len(line.split()[0]) == 1:
177
- sentences.append(line[1:].strip())
178
- else:
179
- sentences.append(line[2:].strip())
180
- # Text might contain double spaces.
181
- else:
182
- book_idx = 0
183
- book_sentences = []
184
- for idx, line in enumerate(f):
185
- line = line.decode("utf-8")
186
- if line[:12] == "_BOOK_TITLE_":
187
- if idx == 0: # First line:
188
- title = line.split(":")[1].strip()
189
- else:
190
- yield book_idx, {
191
- "title": title,
192
- "content": "".join(book_sentences),
193
- }
194
- title = line.split(":")[1].strip()
195
- book_sentences = []
196
- book_idx += 1
197
- else:
198
- book_sentences.append(line)
199
- else:
200
- yield book_idx, {
201
- "title": title,
202
- "content": "".join(book_sentences),
203
- }
204
- book_sentences = []
205
- book_idx += 1
206
- break