system HF staff commited on
Commit
aab6dc1
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"snli_format": {"description": "The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question \nand the correct answer choice are converted into an assertive statement to form the hypothesis. We use information \nretrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We \ncrowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create \nthe SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples \nwith neutral label\n", "citation": "inproceedings{scitail,\n Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},\n Booktitle = {AAAI},\n Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},\n Year = {2018}\n}\n", "homepage": "https://allenai.org/data/scitail", "license": "", "features": {"sentence1_binary_parse": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1_parse": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2_parse": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "annotator_labels": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scitail", "config_name": "snli_format", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 22495833, "num_examples": 23596, "dataset_name": "scitail"}, "test": {"name": "test", "num_bytes": 2008631, "num_examples": 2126, "dataset_name": "scitail"}, "validation": {"name": "validation", "num_bytes": 1266529, "num_examples": 1304, "dataset_name": "scitail"}}, "download_checksums": {"http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip": {"num_bytes": 14174621, "checksum": "3fccd37350a94ca280b75998568df85fc2fc62843a3198d644fcbf858e6943d5"}}, "download_size": 14174621, "dataset_size": 25770993, "size_in_bytes": 39945614}, "tsv_format": {"description": "The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question \nand the correct answer choice are converted into an assertive statement to form the hypothesis. We use information \nretrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We \ncrowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create \nthe SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples \nwith neutral label\n", "citation": "inproceedings{scitail,\n Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},\n Booktitle = {AAAI},\n Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},\n Year = {2018}\n}\n", "homepage": "https://allenai.org/data/scitail", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scitail", "config_name": "tsv_format", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4618115, "num_examples": 23097, "dataset_name": "scitail"}, "test": {"name": "test", "num_bytes": 411343, "num_examples": 2126, "dataset_name": "scitail"}, "validation": {"name": "validation", "num_bytes": 261086, "num_examples": 1304, "dataset_name": "scitail"}}, "download_checksums": {"http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip": {"num_bytes": 14174621, "checksum": "3fccd37350a94ca280b75998568df85fc2fc62843a3198d644fcbf858e6943d5"}}, "download_size": 14174621, "dataset_size": 5290544, "size_in_bytes": 19465165}, "dgem_format": {"description": "The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question \nand the correct answer choice are converted into an assertive statement to form the hypothesis. We use information \nretrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We \ncrowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create \nthe SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples \nwith neutral label\n", "citation": "inproceedings{scitail,\n Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},\n Booktitle = {AAAI},\n Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},\n Year = {2018}\n}\n", "homepage": "https://allenai.org/data/scitail", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_graph_structure": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scitail", "config_name": "dgem_format", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6832104, "num_examples": 23088, "dataset_name": "scitail"}, "test": {"name": "test", "num_bytes": 608213, "num_examples": 2126, "dataset_name": "scitail"}, "validation": {"name": "validation", "num_bytes": 394040, "num_examples": 1304, "dataset_name": "scitail"}}, "download_checksums": {"http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip": {"num_bytes": 14174621, "checksum": "3fccd37350a94ca280b75998568df85fc2fc62843a3198d644fcbf858e6943d5"}}, "download_size": 14174621, "dataset_size": 7834357, "size_in_bytes": 22008978}, "predictor_format": {"description": "The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question \nand the correct answer choice are converted into an assertive statement to form the hypothesis. We use information \nretrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We \ncrowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create \nthe SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples \nwith neutral label\n", "citation": "inproceedings{scitail,\n Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},\n Booktitle = {AAAI},\n Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},\n Year = {2018}\n}\n", "homepage": "https://allenai.org/data/scitail", "license": "", "features": {"answer": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2_structure": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scitail", "config_name": "predictor_format", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8884823, "num_examples": 23587, "dataset_name": "scitail"}, "test": {"name": "test", "num_bytes": 797161, "num_examples": 2126, "dataset_name": "scitail"}, "validation": {"name": "validation", "num_bytes": 511305, "num_examples": 1304, "dataset_name": "scitail"}}, "download_checksums": {"http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip": {"num_bytes": 14174621, "checksum": "3fccd37350a94ca280b75998568df85fc2fc62843a3198d644fcbf858e6943d5"}}, "download_size": 14174621, "dataset_size": 10193289, "size_in_bytes": 24367910}}
dummy/dgem_format/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9780d78e6912f6c0eaff9c195ff2b436ab51b16c2ce701518f797b36a8ef0726
3
+ size 2335
dummy/predictor_format/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f6ef64a1229f7fda023409e0aac23bdbedfb031659ac9483afda7ad5aa8a1fa
3
+ size 2618
dummy/snli_format/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd9ec34873ade4d199c1e6cb4bf6b7517c919019fbb374bd3af456dfd121c41a
3
+ size 4903
dummy/tsv_format/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02ae4a4663505680821a2a09264e7a06294eb7aaf16e91ec8c38ca29a82b4015
3
+ size 2834
scitail.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(sciTail): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import csv
6
+ import json
7
+ import os
8
+ import textwrap
9
+
10
+ import datasets
11
+
12
+
13
+ # TODO(sciTail): BibTeX citation
14
+ _CITATION = """\
15
+ inproceedings{scitail,
16
+ Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},
17
+ Booktitle = {AAAI},
18
+ Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},
19
+ Year = {2018}
20
+ }
21
+ """
22
+
23
+ # TODO(sciTail):
24
+ _DESCRIPTION = """\
25
+ The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question
26
+ and the correct answer choice are converted into an assertive statement to form the hypothesis. We use information
27
+ retrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We
28
+ crowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create
29
+ the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples
30
+ with neutral label
31
+ """
32
+
33
+ _URL = "http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip"
34
+
35
+
36
+ class ScitailConfig(datasets.BuilderConfig):
37
+
38
+ """ BuilderConfig for Xquad"""
39
+
40
+ def __init__(self, **kwargs):
41
+ """
42
+
43
+ Args:
44
+ **kwargs: keyword arguments forwarded to super.
45
+ """
46
+ super(ScitailConfig, self).__init__(version=datasets.Version("1.1.0", ""), **kwargs)
47
+
48
+
49
+ class Scitail(datasets.GeneratorBasedBuilder):
50
+ """TODO(sciTail): Short description of my dataset."""
51
+
52
+ # TODO(sciTail): Set up version.
53
+ VERSION = datasets.Version("1.1.0")
54
+ BUILDER_CONFIGS = [
55
+ ScitailConfig(
56
+ name="snli_format",
57
+ description="JSONL format used by SNLI with a JSON object corresponding to each entailment example in each line.",
58
+ ),
59
+ ScitailConfig(
60
+ name="tsv_format", description="Tab-separated format with three columns: premise hypothesis label"
61
+ ),
62
+ ScitailConfig(
63
+ name="dgem_format",
64
+ description="Tab-separated format used by the DGEM model: premise hypothesis label hypothesis graph structure",
65
+ ),
66
+ ScitailConfig(
67
+ name="predictor_format",
68
+ description=textwrap.dedent(
69
+ """\
70
+ AllenNLP predictors work only with JSONL format. This folder contains the SciTail train/dev/test in JSONL format
71
+ so that it can be loaded into the predictors. Each line is a JSON object with the following keys:
72
+ gold_label : the example label from {entails, neutral}
73
+ sentence1: the premise
74
+ sentence2: the hypothesis
75
+ sentence2_structure: structure from the hypothesis """
76
+ ),
77
+ ),
78
+ ]
79
+
80
+ def _info(self):
81
+ # TODO(sciTail): Specifies the datasets.DatasetInfo object
82
+ if self.config.name == "snli_format":
83
+ return datasets.DatasetInfo(
84
+ # This is the description that will appear on the datasets page.
85
+ description=_DESCRIPTION,
86
+ # datasets.features.FeatureConnectors
87
+ features=datasets.Features(
88
+ {
89
+ "sentence1_binary_parse": datasets.Value("string"),
90
+ "sentence1_parse": datasets.Value("string"),
91
+ "sentence1": datasets.Value("string"),
92
+ "sentence2_parse": datasets.Value("string"),
93
+ "sentence2": datasets.Value("string"),
94
+ "annotator_labels": datasets.features.Sequence(datasets.Value("string")),
95
+ "gold_label": datasets.Value("string")
96
+ # These are the features of your dataset like images, labels ...
97
+ }
98
+ ),
99
+ # If there's a common (input, target) tuple from the features,
100
+ # specify them here. They'll be used if as_supervised=True in
101
+ # builder.as_dataset.
102
+ supervised_keys=None,
103
+ # Homepage of the dataset for documentation
104
+ homepage="https://allenai.org/data/scitail",
105
+ citation=_CITATION,
106
+ )
107
+ elif self.config.name == "tsv_format":
108
+ return datasets.DatasetInfo(
109
+ # This is the description that will appear on the datasets page.
110
+ description=_DESCRIPTION,
111
+ # datasets.features.FeatureConnectors
112
+ features=datasets.Features(
113
+ {
114
+ "premise": datasets.Value("string"),
115
+ "hypothesis": datasets.Value("string"),
116
+ "label": datasets.Value("string")
117
+ # These are the features of your dataset like images, labels ...
118
+ }
119
+ ),
120
+ # If there's a common (input, target) tuple from the features,
121
+ # specify them here. They'll be used if as_supervised=True in
122
+ # builder.as_dataset.
123
+ supervised_keys=None,
124
+ # Homepage of the dataset for documentation
125
+ homepage="https://allenai.org/data/scitail",
126
+ citation=_CITATION,
127
+ )
128
+ elif self.config.name == "predictor_format":
129
+ return datasets.DatasetInfo(
130
+ # This is the description that will appear on the datasets page.
131
+ description=_DESCRIPTION,
132
+ # datasets.features.FeatureConnectors
133
+ features=datasets.Features(
134
+ {
135
+ "answer": datasets.Value("string"),
136
+ "sentence2_structure": datasets.Value("string"),
137
+ "sentence1": datasets.Value("string"),
138
+ "sentence2": datasets.Value("string"),
139
+ "gold_label": datasets.Value("string"),
140
+ "question": datasets.Value("string")
141
+ # These are the features of your dataset like images, labels ...
142
+ }
143
+ ),
144
+ # If there's a common (input, target) tuple from the features,
145
+ # specify them here. They'll be used if as_supervised=True in
146
+ # builder.as_dataset.
147
+ supervised_keys=None,
148
+ # Homepage of the dataset for documentation
149
+ homepage="https://allenai.org/data/scitail",
150
+ citation=_CITATION,
151
+ )
152
+ elif self.config.name == "dgem_format":
153
+ return datasets.DatasetInfo(
154
+ # This is the description that will appear on the datasets page.
155
+ description=_DESCRIPTION,
156
+ # datasets.features.FeatureConnectors
157
+ features=datasets.Features(
158
+ {
159
+ "premise": datasets.Value("string"),
160
+ "hypothesis": datasets.Value("string"),
161
+ "label": datasets.Value("string"),
162
+ "hypothesis_graph_structure": datasets.Value("string")
163
+ # These are the features of your dataset like images, labels ...
164
+ }
165
+ ),
166
+ # If there's a common (input, target) tuple from the features,
167
+ # specify them here. They'll be used if as_supervised=True in
168
+ # builder.as_dataset.
169
+ supervised_keys=None,
170
+ # Homepage of the dataset for documentation
171
+ homepage="https://allenai.org/data/scitail",
172
+ citation=_CITATION,
173
+ )
174
+
175
+ def _split_generators(self, dl_manager):
176
+ """Returns SplitGenerators."""
177
+ # TODO(sciTail): Downloads the data and defines the splits
178
+ # dl_manager is a datasets.download.DownloadManager that can be used to
179
+ # download and extract URLs
180
+ dl_dir = dl_manager.download_and_extract(_URL)
181
+ data_dir = os.path.join(dl_dir, "SciTailV1.1")
182
+ snli = os.path.join(data_dir, "snli_format")
183
+ dgem = os.path.join(data_dir, "dgem_format")
184
+ tsv = os.path.join(data_dir, "tsv_format")
185
+ predictor = os.path.join(data_dir, "predictor_format")
186
+ if self.config.name == "snli_format":
187
+ return [
188
+ datasets.SplitGenerator(
189
+ name=datasets.Split.TRAIN,
190
+ # These kwargs will be passed to _generate_examples
191
+ gen_kwargs={"filepath": os.path.join(snli, "scitail_1.0_train.txt")},
192
+ ),
193
+ datasets.SplitGenerator(
194
+ name=datasets.Split.TEST,
195
+ # These kwargs will be passed to _generate_examples
196
+ gen_kwargs={"filepath": os.path.join(snli, "scitail_1.0_test.txt")},
197
+ ),
198
+ datasets.SplitGenerator(
199
+ name=datasets.Split.VALIDATION,
200
+ # These kwargs will be passed to _generate_examples
201
+ gen_kwargs={"filepath": os.path.join(snli, "scitail_1.0_dev.txt")},
202
+ ),
203
+ ]
204
+ elif self.config.name == "tsv_format":
205
+ return [
206
+ datasets.SplitGenerator(
207
+ name=datasets.Split.TRAIN,
208
+ # These kwargs will be passed to _generate_examples
209
+ gen_kwargs={"filepath": os.path.join(tsv, "scitail_1.0_train.tsv")},
210
+ ),
211
+ datasets.SplitGenerator(
212
+ name=datasets.Split.TEST,
213
+ # These kwargs will be passed to _generate_examples
214
+ gen_kwargs={"filepath": os.path.join(tsv, "scitail_1.0_test.tsv")},
215
+ ),
216
+ datasets.SplitGenerator(
217
+ name=datasets.Split.VALIDATION,
218
+ # These kwargs will be passed to _generate_examples
219
+ gen_kwargs={"filepath": os.path.join(tsv, "scitail_1.0_dev.tsv")},
220
+ ),
221
+ ]
222
+ elif self.config.name == "predictor_format":
223
+ return [
224
+ datasets.SplitGenerator(
225
+ name=datasets.Split.TRAIN,
226
+ # These kwargs will be passed to _generate_examples
227
+ gen_kwargs={"filepath": os.path.join(predictor, "scitail_1.0_structure_train.jsonl")},
228
+ ),
229
+ datasets.SplitGenerator(
230
+ name=datasets.Split.TEST,
231
+ # These kwargs will be passed to _generate_examples
232
+ gen_kwargs={"filepath": os.path.join(predictor, "scitail_1.0_structure_test.jsonl")},
233
+ ),
234
+ datasets.SplitGenerator(
235
+ name=datasets.Split.VALIDATION,
236
+ # These kwargs will be passed to _generate_examples
237
+ gen_kwargs={"filepath": os.path.join(predictor, "scitail_1.0_structure_dev.jsonl")},
238
+ ),
239
+ ]
240
+ elif self.config.name == "dgem_format":
241
+ return [
242
+ datasets.SplitGenerator(
243
+ name=datasets.Split.TRAIN,
244
+ # These kwargs will be passed to _generate_examples
245
+ gen_kwargs={"filepath": os.path.join(dgem, "scitail_1.0_structure_train.tsv")},
246
+ ),
247
+ datasets.SplitGenerator(
248
+ name=datasets.Split.TEST,
249
+ # These kwargs will be passed to _generate_examples
250
+ gen_kwargs={"filepath": os.path.join(dgem, "scitail_1.0_structure_test.tsv")},
251
+ ),
252
+ datasets.SplitGenerator(
253
+ name=datasets.Split.VALIDATION,
254
+ # These kwargs will be passed to _generate_examples
255
+ gen_kwargs={"filepath": os.path.join(dgem, "scitail_1.0_structure_dev.tsv")},
256
+ ),
257
+ ]
258
+
259
+ def _generate_examples(self, filepath):
260
+ """Yields examples."""
261
+ # TODO(sciTail): Yields (key, example) tuples from the dataset
262
+ with open(filepath, encoding="utf-8") as f:
263
+ if self.config.name == "snli_format":
264
+ for id_, row in enumerate(f):
265
+ data = json.loads(row)
266
+
267
+ yield id_, {
268
+ "sentence1_binary_parse": data["sentence1_binary_parse"],
269
+ "sentence1_parse": data["sentence1_parse"],
270
+ "sentence1": data["sentence1"],
271
+ "sentence2_parse": data["sentence2_parse"],
272
+ "sentence2": data["sentence2"],
273
+ "annotator_labels": data["annotator_labels"],
274
+ "gold_label": data["gold_label"],
275
+ }
276
+ elif self.config.name == "tsv_format":
277
+ data = csv.reader(f, delimiter="\t")
278
+ for id_, row in enumerate(data):
279
+ yield id_, {"premise": row[0], "hypothesis": row[1], "label": row[2]}
280
+ elif self.config.name == "dgem_format":
281
+ data = csv.reader(f, delimiter="\t")
282
+ for id_, row in enumerate(data):
283
+ yield id_, {
284
+ "premise": row[0],
285
+ "hypothesis": row[1],
286
+ "label": row[2],
287
+ "hypothesis_graph_structure": row[3],
288
+ }
289
+ elif self.config.name == "predictor_format":
290
+ for id_, row in enumerate(f):
291
+ data = json.loads(row)
292
+ yield id_, {
293
+ "answer": data["answer"],
294
+ "sentence2_structure": data["sentence2_structure"],
295
+ "sentence1": data["sentence1"],
296
+ "sentence2": data["sentence2"],
297
+ "gold_label": data["gold_label"],
298
+ "question": data["question"],
299
+ }