Convert dataset to Parquet

#3
by albertvillanova HF staff - opened
README.md CHANGED
@@ -4,102 +4,135 @@ language:
4
  paperswithcode_id: scitail
5
  pretty_name: SciTail
6
  dataset_info:
7
- - config_name: snli_format
8
  features:
9
- - name: sentence1_binary_parse
10
- dtype: string
11
- - name: sentence1_parse
12
- dtype: string
13
- - name: sentence1
14
  dtype: string
15
- - name: sentence2_parse
16
  dtype: string
17
- - name: sentence2
18
  dtype: string
19
- - name: annotator_labels
20
- sequence: string
21
- - name: gold_label
22
  dtype: string
23
  splits:
24
  - name: train
25
- num_bytes: 22495833
26
- num_examples: 23596
27
  - name: test
28
- num_bytes: 2008631
29
  num_examples: 2126
30
  - name: validation
31
- num_bytes: 1266529
32
  num_examples: 1304
33
- download_size: 14174621
34
- dataset_size: 25770993
35
- - config_name: tsv_format
36
  features:
37
- - name: premise
38
  dtype: string
39
- - name: hypothesis
40
  dtype: string
41
- - name: label
 
 
 
 
 
 
42
  dtype: string
43
  splits:
44
  - name: train
45
- num_bytes: 4618115
46
- num_examples: 23097
47
  - name: test
48
- num_bytes: 411343
49
  num_examples: 2126
50
  - name: validation
51
- num_bytes: 261086
52
  num_examples: 1304
53
- download_size: 14174621
54
- dataset_size: 5290544
55
- - config_name: dgem_format
56
  features:
57
- - name: premise
58
  dtype: string
59
- - name: hypothesis
60
  dtype: string
61
- - name: label
62
  dtype: string
63
- - name: hypothesis_graph_structure
 
 
 
 
 
 
64
  dtype: string
65
  splits:
66
  - name: train
67
- num_bytes: 6832104
68
- num_examples: 23088
69
  - name: test
70
- num_bytes: 608213
71
  num_examples: 2126
72
  - name: validation
73
- num_bytes: 394040
74
  num_examples: 1304
75
- download_size: 14174621
76
- dataset_size: 7834357
77
- - config_name: predictor_format
78
  features:
79
- - name: answer
80
- dtype: string
81
- - name: sentence2_structure
82
- dtype: string
83
- - name: sentence1
84
- dtype: string
85
- - name: sentence2
86
  dtype: string
87
- - name: gold_label
88
  dtype: string
89
- - name: question
90
  dtype: string
91
  splits:
92
  - name: train
93
- num_bytes: 8884823
94
- num_examples: 23587
95
  - name: test
96
- num_bytes: 797161
97
  num_examples: 2126
98
  - name: validation
99
- num_bytes: 511305
100
  num_examples: 1304
101
- download_size: 14174621
102
- dataset_size: 10193289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  ---
104
 
105
  # Dataset Card for "scitail"
 
4
  paperswithcode_id: scitail
5
  pretty_name: SciTail
6
  dataset_info:
7
+ - config_name: dgem_format
8
  features:
9
+ - name: premise
 
 
 
 
10
  dtype: string
11
+ - name: hypothesis
12
  dtype: string
13
+ - name: label
14
  dtype: string
15
+ - name: hypothesis_graph_structure
 
 
16
  dtype: string
17
  splits:
18
  - name: train
19
+ num_bytes: 6817626
20
+ num_examples: 23088
21
  - name: test
22
+ num_bytes: 606867
23
  num_examples: 2126
24
  - name: validation
25
+ num_bytes: 393209
26
  num_examples: 1304
27
+ download_size: 2007018
28
+ dataset_size: 7817702
29
+ - config_name: predictor_format
30
  features:
31
+ - name: answer
32
  dtype: string
33
+ - name: sentence2_structure
34
  dtype: string
35
+ - name: sentence1
36
+ dtype: string
37
+ - name: sentence2
38
+ dtype: string
39
+ - name: gold_label
40
+ dtype: string
41
+ - name: question
42
  dtype: string
43
  splits:
44
  - name: train
45
+ num_bytes: 8864108
46
+ num_examples: 23587
47
  - name: test
48
+ num_bytes: 795275
49
  num_examples: 2126
50
  - name: validation
51
+ num_bytes: 510140
52
  num_examples: 1304
53
+ download_size: 2169238
54
+ dataset_size: 10169523
55
+ - config_name: snli_format
56
  features:
57
+ - name: sentence1_binary_parse
58
  dtype: string
59
+ - name: sentence1_parse
60
  dtype: string
61
+ - name: sentence1
62
  dtype: string
63
+ - name: sentence2_parse
64
+ dtype: string
65
+ - name: sentence2
66
+ dtype: string
67
+ - name: annotator_labels
68
+ sequence: string
69
+ - name: gold_label
70
  dtype: string
71
  splits:
72
  - name: train
73
+ num_bytes: 22457379
74
+ num_examples: 23596
75
  - name: test
76
+ num_bytes: 2005142
77
  num_examples: 2126
78
  - name: validation
79
+ num_bytes: 1264378
80
  num_examples: 1304
81
+ download_size: 7476483
82
+ dataset_size: 25726899
83
+ - config_name: tsv_format
84
  features:
85
+ - name: premise
 
 
 
 
 
 
86
  dtype: string
87
+ - name: hypothesis
88
  dtype: string
89
+ - name: label
90
  dtype: string
91
  splits:
92
  - name: train
93
+ num_bytes: 4606527
94
+ num_examples: 23097
95
  - name: test
96
+ num_bytes: 410267
97
  num_examples: 2126
98
  - name: validation
99
+ num_bytes: 260422
100
  num_examples: 1304
101
+ download_size: 1836546
102
+ dataset_size: 5277216
103
+ configs:
104
+ - config_name: dgem_format
105
+ data_files:
106
+ - split: train
107
+ path: dgem_format/train-*
108
+ - split: test
109
+ path: dgem_format/test-*
110
+ - split: validation
111
+ path: dgem_format/validation-*
112
+ - config_name: predictor_format
113
+ data_files:
114
+ - split: train
115
+ path: predictor_format/train-*
116
+ - split: test
117
+ path: predictor_format/test-*
118
+ - split: validation
119
+ path: predictor_format/validation-*
120
+ - config_name: snli_format
121
+ data_files:
122
+ - split: train
123
+ path: snli_format/train-*
124
+ - split: test
125
+ path: snli_format/test-*
126
+ - split: validation
127
+ path: snli_format/validation-*
128
+ - config_name: tsv_format
129
+ data_files:
130
+ - split: train
131
+ path: tsv_format/train-*
132
+ - split: test
133
+ path: tsv_format/test-*
134
+ - split: validation
135
+ path: tsv_format/validation-*
136
  ---
137
 
138
  # Dataset Card for "scitail"
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"snli_format": {"description": "The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question \nand the correct answer choice are converted into an assertive statement to form the hypothesis. We use information \nretrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We \ncrowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create \nthe SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples \nwith neutral label\n", "citation": "inproceedings{scitail,\n Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},\n Booktitle = {AAAI},\n Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},\n Year = {2018}\n}\n", "homepage": "https://allenai.org/data/scitail", "license": "", "features": {"sentence1_binary_parse": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1_parse": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2_parse": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "annotator_labels": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scitail", "config_name": "snli_format", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 22495833, "num_examples": 23596, "dataset_name": "scitail"}, "test": {"name": "test", "num_bytes": 2008631, "num_examples": 2126, "dataset_name": "scitail"}, "validation": {"name": "validation", "num_bytes": 1266529, "num_examples": 1304, "dataset_name": "scitail"}}, "download_checksums": {"http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip": {"num_bytes": 14174621, "checksum": "3fccd37350a94ca280b75998568df85fc2fc62843a3198d644fcbf858e6943d5"}}, "download_size": 14174621, "dataset_size": 25770993, "size_in_bytes": 39945614}, "tsv_format": {"description": "The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question \nand the correct answer choice are converted into an assertive statement to form the hypothesis. We use information \nretrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We \ncrowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create \nthe SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples \nwith neutral label\n", "citation": "inproceedings{scitail,\n Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},\n Booktitle = {AAAI},\n Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},\n Year = {2018}\n}\n", "homepage": "https://allenai.org/data/scitail", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scitail", "config_name": "tsv_format", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4618115, "num_examples": 23097, "dataset_name": "scitail"}, "test": {"name": "test", "num_bytes": 411343, "num_examples": 2126, "dataset_name": "scitail"}, "validation": {"name": "validation", "num_bytes": 261086, "num_examples": 1304, "dataset_name": "scitail"}}, "download_checksums": {"http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip": {"num_bytes": 14174621, "checksum": "3fccd37350a94ca280b75998568df85fc2fc62843a3198d644fcbf858e6943d5"}}, "download_size": 14174621, "dataset_size": 5290544, "size_in_bytes": 19465165}, "dgem_format": {"description": "The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question \nand the correct answer choice are converted into an assertive statement to form the hypothesis. We use information \nretrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We \ncrowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create \nthe SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples \nwith neutral label\n", "citation": "inproceedings{scitail,\n Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},\n Booktitle = {AAAI},\n Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},\n Year = {2018}\n}\n", "homepage": "https://allenai.org/data/scitail", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_graph_structure": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scitail", "config_name": "dgem_format", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6832104, "num_examples": 23088, "dataset_name": "scitail"}, "test": {"name": "test", "num_bytes": 608213, "num_examples": 2126, "dataset_name": "scitail"}, "validation": {"name": "validation", "num_bytes": 394040, "num_examples": 1304, "dataset_name": "scitail"}}, "download_checksums": {"http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip": {"num_bytes": 14174621, "checksum": "3fccd37350a94ca280b75998568df85fc2fc62843a3198d644fcbf858e6943d5"}}, "download_size": 14174621, "dataset_size": 7834357, "size_in_bytes": 22008978}, "predictor_format": {"description": "The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question \nand the correct answer choice are converted into an assertive statement to form the hypothesis. We use information \nretrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We \ncrowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create \nthe SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples \nwith neutral label\n", "citation": "inproceedings{scitail,\n Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},\n Booktitle = {AAAI},\n Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},\n Year = {2018}\n}\n", "homepage": "https://allenai.org/data/scitail", "license": "", "features": {"answer": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2_structure": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scitail", "config_name": "predictor_format", "version": {"version_str": "1.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8884823, "num_examples": 23587, "dataset_name": "scitail"}, "test": {"name": "test", "num_bytes": 797161, "num_examples": 2126, "dataset_name": "scitail"}, "validation": {"name": "validation", "num_bytes": 511305, "num_examples": 1304, "dataset_name": "scitail"}}, "download_checksums": {"http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip": {"num_bytes": 14174621, "checksum": "3fccd37350a94ca280b75998568df85fc2fc62843a3198d644fcbf858e6943d5"}}, "download_size": 14174621, "dataset_size": 10193289, "size_in_bytes": 24367910}}
 
 
dgem_format/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56dbd29881d108dc3d2ccb4c5cce523c92c6f170261318e47731f654962974ad
3
+ size 185039
dgem_format/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d2a8a30dbc74e45e09d6d51c5193c83285a94546db26a88d3193487ab4bcc1e
3
+ size 1709686
dgem_format/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de818ba1a5eedea8861a974031697fac1d33df7bf9b79d70f58b173be32ef710
3
+ size 112293
predictor_format/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e9a9018642c215666fa0d257a3f4b583223e3b54c0266ad7e5d95ae306bb125
3
+ size 210214
predictor_format/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f50f8d853224ea674854538ce84a38091d392a813845df9693494815d931251b
3
+ size 1833842
predictor_format/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e101a3ddfb134d8cd9b185b1c660f777aaff8dabcb8f068b4285b4e4368353b
3
+ size 125182
scitail.py DELETED
@@ -1,298 +0,0 @@
1
- """TODO(sciTail): Add a description here."""
2
-
3
-
4
- import csv
5
- import json
6
- import os
7
- import textwrap
8
-
9
- import datasets
10
-
11
-
12
- # TODO(sciTail): BibTeX citation
13
- _CITATION = """\
14
- inproceedings{scitail,
15
- Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},
16
- Booktitle = {AAAI},
17
- Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},
18
- Year = {2018}
19
- }
20
- """
21
-
22
- # TODO(sciTail):
23
- _DESCRIPTION = """\
24
- The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question
25
- and the correct answer choice are converted into an assertive statement to form the hypothesis. We use information
26
- retrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We
27
- crowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create
28
- the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples
29
- with neutral label
30
- """
31
-
32
- _URL = "http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip"
33
-
34
-
35
- class ScitailConfig(datasets.BuilderConfig):
36
-
37
- """BuilderConfig for Xquad"""
38
-
39
- def __init__(self, **kwargs):
40
- """
41
-
42
- Args:
43
- **kwargs: keyword arguments forwarded to super.
44
- """
45
- super(ScitailConfig, self).__init__(version=datasets.Version("1.1.0", ""), **kwargs)
46
-
47
-
48
- class Scitail(datasets.GeneratorBasedBuilder):
49
- """TODO(sciTail): Short description of my dataset."""
50
-
51
- # TODO(sciTail): Set up version.
52
- VERSION = datasets.Version("1.1.0")
53
- BUILDER_CONFIGS = [
54
- ScitailConfig(
55
- name="snli_format",
56
- description="JSONL format used by SNLI with a JSON object corresponding to each entailment example in each line.",
57
- ),
58
- ScitailConfig(
59
- name="tsv_format", description="Tab-separated format with three columns: premise hypothesis label"
60
- ),
61
- ScitailConfig(
62
- name="dgem_format",
63
- description="Tab-separated format used by the DGEM model: premise hypothesis label hypothesis graph structure",
64
- ),
65
- ScitailConfig(
66
- name="predictor_format",
67
- description=textwrap.dedent(
68
- """\
69
- AllenNLP predictors work only with JSONL format. This folder contains the SciTail train/dev/test in JSONL format
70
- so that it can be loaded into the predictors. Each line is a JSON object with the following keys:
71
- gold_label : the example label from {entails, neutral}
72
- sentence1: the premise
73
- sentence2: the hypothesis
74
- sentence2_structure: structure from the hypothesis """
75
- ),
76
- ),
77
- ]
78
-
79
- def _info(self):
80
- # TODO(sciTail): Specifies the datasets.DatasetInfo object
81
- if self.config.name == "snli_format":
82
- return datasets.DatasetInfo(
83
- # This is the description that will appear on the datasets page.
84
- description=_DESCRIPTION,
85
- # datasets.features.FeatureConnectors
86
- features=datasets.Features(
87
- {
88
- "sentence1_binary_parse": datasets.Value("string"),
89
- "sentence1_parse": datasets.Value("string"),
90
- "sentence1": datasets.Value("string"),
91
- "sentence2_parse": datasets.Value("string"),
92
- "sentence2": datasets.Value("string"),
93
- "annotator_labels": datasets.features.Sequence(datasets.Value("string")),
94
- "gold_label": datasets.Value("string")
95
- # These are the features of your dataset like images, labels ...
96
- }
97
- ),
98
- # If there's a common (input, target) tuple from the features,
99
- # specify them here. They'll be used if as_supervised=True in
100
- # builder.as_dataset.
101
- supervised_keys=None,
102
- # Homepage of the dataset for documentation
103
- homepage="https://allenai.org/data/scitail",
104
- citation=_CITATION,
105
- )
106
- elif self.config.name == "tsv_format":
107
- return datasets.DatasetInfo(
108
- # This is the description that will appear on the datasets page.
109
- description=_DESCRIPTION,
110
- # datasets.features.FeatureConnectors
111
- features=datasets.Features(
112
- {
113
- "premise": datasets.Value("string"),
114
- "hypothesis": datasets.Value("string"),
115
- "label": datasets.Value("string")
116
- # These are the features of your dataset like images, labels ...
117
- }
118
- ),
119
- # If there's a common (input, target) tuple from the features,
120
- # specify them here. They'll be used if as_supervised=True in
121
- # builder.as_dataset.
122
- supervised_keys=None,
123
- # Homepage of the dataset for documentation
124
- homepage="https://allenai.org/data/scitail",
125
- citation=_CITATION,
126
- )
127
- elif self.config.name == "predictor_format":
128
- return datasets.DatasetInfo(
129
- # This is the description that will appear on the datasets page.
130
- description=_DESCRIPTION,
131
- # datasets.features.FeatureConnectors
132
- features=datasets.Features(
133
- {
134
- "answer": datasets.Value("string"),
135
- "sentence2_structure": datasets.Value("string"),
136
- "sentence1": datasets.Value("string"),
137
- "sentence2": datasets.Value("string"),
138
- "gold_label": datasets.Value("string"),
139
- "question": datasets.Value("string")
140
- # These are the features of your dataset like images, labels ...
141
- }
142
- ),
143
- # If there's a common (input, target) tuple from the features,
144
- # specify them here. They'll be used if as_supervised=True in
145
- # builder.as_dataset.
146
- supervised_keys=None,
147
- # Homepage of the dataset for documentation
148
- homepage="https://allenai.org/data/scitail",
149
- citation=_CITATION,
150
- )
151
- elif self.config.name == "dgem_format":
152
- return datasets.DatasetInfo(
153
- # This is the description that will appear on the datasets page.
154
- description=_DESCRIPTION,
155
- # datasets.features.FeatureConnectors
156
- features=datasets.Features(
157
- {
158
- "premise": datasets.Value("string"),
159
- "hypothesis": datasets.Value("string"),
160
- "label": datasets.Value("string"),
161
- "hypothesis_graph_structure": datasets.Value("string")
162
- # These are the features of your dataset like images, labels ...
163
- }
164
- ),
165
- # If there's a common (input, target) tuple from the features,
166
- # specify them here. They'll be used if as_supervised=True in
167
- # builder.as_dataset.
168
- supervised_keys=None,
169
- # Homepage of the dataset for documentation
170
- homepage="https://allenai.org/data/scitail",
171
- citation=_CITATION,
172
- )
173
-
174
- def _split_generators(self, dl_manager):
175
- """Returns SplitGenerators."""
176
- # TODO(sciTail): Downloads the data and defines the splits
177
- # dl_manager is a datasets.download.DownloadManager that can be used to
178
- # download and extract URLs
179
- dl_dir = dl_manager.download_and_extract(_URL)
180
- data_dir = os.path.join(dl_dir, "SciTailV1.1")
181
- snli = os.path.join(data_dir, "snli_format")
182
- dgem = os.path.join(data_dir, "dgem_format")
183
- tsv = os.path.join(data_dir, "tsv_format")
184
- predictor = os.path.join(data_dir, "predictor_format")
185
- if self.config.name == "snli_format":
186
- return [
187
- datasets.SplitGenerator(
188
- name=datasets.Split.TRAIN,
189
- # These kwargs will be passed to _generate_examples
190
- gen_kwargs={"filepath": os.path.join(snli, "scitail_1.0_train.txt")},
191
- ),
192
- datasets.SplitGenerator(
193
- name=datasets.Split.TEST,
194
- # These kwargs will be passed to _generate_examples
195
- gen_kwargs={"filepath": os.path.join(snli, "scitail_1.0_test.txt")},
196
- ),
197
- datasets.SplitGenerator(
198
- name=datasets.Split.VALIDATION,
199
- # These kwargs will be passed to _generate_examples
200
- gen_kwargs={"filepath": os.path.join(snli, "scitail_1.0_dev.txt")},
201
- ),
202
- ]
203
- elif self.config.name == "tsv_format":
204
- return [
205
- datasets.SplitGenerator(
206
- name=datasets.Split.TRAIN,
207
- # These kwargs will be passed to _generate_examples
208
- gen_kwargs={"filepath": os.path.join(tsv, "scitail_1.0_train.tsv")},
209
- ),
210
- datasets.SplitGenerator(
211
- name=datasets.Split.TEST,
212
- # These kwargs will be passed to _generate_examples
213
- gen_kwargs={"filepath": os.path.join(tsv, "scitail_1.0_test.tsv")},
214
- ),
215
- datasets.SplitGenerator(
216
- name=datasets.Split.VALIDATION,
217
- # These kwargs will be passed to _generate_examples
218
- gen_kwargs={"filepath": os.path.join(tsv, "scitail_1.0_dev.tsv")},
219
- ),
220
- ]
221
- elif self.config.name == "predictor_format":
222
- return [
223
- datasets.SplitGenerator(
224
- name=datasets.Split.TRAIN,
225
- # These kwargs will be passed to _generate_examples
226
- gen_kwargs={"filepath": os.path.join(predictor, "scitail_1.0_structure_train.jsonl")},
227
- ),
228
- datasets.SplitGenerator(
229
- name=datasets.Split.TEST,
230
- # These kwargs will be passed to _generate_examples
231
- gen_kwargs={"filepath": os.path.join(predictor, "scitail_1.0_structure_test.jsonl")},
232
- ),
233
- datasets.SplitGenerator(
234
- name=datasets.Split.VALIDATION,
235
- # These kwargs will be passed to _generate_examples
236
- gen_kwargs={"filepath": os.path.join(predictor, "scitail_1.0_structure_dev.jsonl")},
237
- ),
238
- ]
239
- elif self.config.name == "dgem_format":
240
- return [
241
- datasets.SplitGenerator(
242
- name=datasets.Split.TRAIN,
243
- # These kwargs will be passed to _generate_examples
244
- gen_kwargs={"filepath": os.path.join(dgem, "scitail_1.0_structure_train.tsv")},
245
- ),
246
- datasets.SplitGenerator(
247
- name=datasets.Split.TEST,
248
- # These kwargs will be passed to _generate_examples
249
- gen_kwargs={"filepath": os.path.join(dgem, "scitail_1.0_structure_test.tsv")},
250
- ),
251
- datasets.SplitGenerator(
252
- name=datasets.Split.VALIDATION,
253
- # These kwargs will be passed to _generate_examples
254
- gen_kwargs={"filepath": os.path.join(dgem, "scitail_1.0_structure_dev.tsv")},
255
- ),
256
- ]
257
-
258
- def _generate_examples(self, filepath):
259
- """Yields examples."""
260
- # TODO(sciTail): Yields (key, example) tuples from the dataset
261
- with open(filepath, encoding="utf-8") as f:
262
- if self.config.name == "snli_format":
263
- for id_, row in enumerate(f):
264
- data = json.loads(row)
265
-
266
- yield id_, {
267
- "sentence1_binary_parse": data["sentence1_binary_parse"],
268
- "sentence1_parse": data["sentence1_parse"],
269
- "sentence1": data["sentence1"],
270
- "sentence2_parse": data["sentence2_parse"],
271
- "sentence2": data["sentence2"],
272
- "annotator_labels": data["annotator_labels"],
273
- "gold_label": data["gold_label"],
274
- }
275
- elif self.config.name == "tsv_format":
276
- data = csv.reader(f, delimiter="\t")
277
- for id_, row in enumerate(data):
278
- yield id_, {"premise": row[0], "hypothesis": row[1], "label": row[2]}
279
- elif self.config.name == "dgem_format":
280
- data = csv.reader(f, delimiter="\t")
281
- for id_, row in enumerate(data):
282
- yield id_, {
283
- "premise": row[0],
284
- "hypothesis": row[1],
285
- "label": row[2],
286
- "hypothesis_graph_structure": row[3],
287
- }
288
- elif self.config.name == "predictor_format":
289
- for id_, row in enumerate(f):
290
- data = json.loads(row)
291
- yield id_, {
292
- "answer": data["answer"],
293
- "sentence2_structure": data["sentence2_structure"],
294
- "sentence1": data["sentence1"],
295
- "sentence2": data["sentence2"],
296
- "gold_label": data["gold_label"],
297
- "question": data["question"],
298
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
snli_format/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9814bcb18de316ee02bb533626bee2ed8db03bed7b0bd6d0deb9d66536ded627
3
+ size 653112
snli_format/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4c77597d52d3ef45e2f9c804b127562395b1d096a6a5ef5da1dc15d7760d394
3
+ size 6423089
snli_format/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfcbb30a8c3781f5ca346244b96ea4b5c0f5e813638b71f7d0a382595cbaa337
3
+ size 400282
tsv_format/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2b4b8b5e258a30fe7d1f7861ad7154f1ebaf8f085f5e051db5e22352cf7ca96
3
+ size 162166
tsv_format/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35ffcef823e42135a4fcee1b5ecb7c951e99f97b6f51c9363a23b537d41fb5d3
3
+ size 1574550
tsv_format/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7342d7d9c3f0c90b904b5fcfa37b909ed77fc3f9f0c4b87618d7718469f55b56
3
+ size 99830