update to support split version of dataset zips
Browse files- climate-fever-nli-stsb.py +44 -16
climate-fever-nli-stsb.py
CHANGED
@@ -12,7 +12,7 @@
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
# TODO: Address all TODOs and remove all explanatory comments
|
15 |
-
"""
|
16 |
|
17 |
|
18 |
import csv
|
@@ -48,6 +48,32 @@ _LICENSE = ""
|
|
48 |
_URLS = {
|
49 |
}
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
53 |
class ClimateFeverNliStsb(datasets.GeneratorBasedBuilder):
|
@@ -114,14 +140,16 @@ class ClimateFeverNliStsb(datasets.GeneratorBasedBuilder):
|
|
114 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
115 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
116 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
117 |
-
|
118 |
-
|
|
|
|
|
119 |
return [
|
120 |
datasets.SplitGenerator(
|
121 |
name=datasets.Split.TRAIN,
|
122 |
# These kwargs will be passed to _generate_examples
|
123 |
gen_kwargs={
|
124 |
-
"filepath": os.path.join(data_dir,
|
125 |
"split": "train",
|
126 |
},
|
127 |
),
|
@@ -129,7 +157,7 @@ class ClimateFeverNliStsb(datasets.GeneratorBasedBuilder):
|
|
129 |
name=datasets.Split.VALIDATION,
|
130 |
# These kwargs will be passed to _generate_examples
|
131 |
gen_kwargs={
|
132 |
-
"filepath": os.path.join(data_dir,
|
133 |
"split": "dev",
|
134 |
},
|
135 |
),
|
@@ -137,7 +165,7 @@ class ClimateFeverNliStsb(datasets.GeneratorBasedBuilder):
|
|
137 |
name=datasets.Split.TEST,
|
138 |
# These kwargs will be passed to _generate_examples
|
139 |
gen_kwargs={
|
140 |
-
"filepath": os.path.join(data_dir,
|
141 |
"split": "test"
|
142 |
},
|
143 |
),
|
@@ -148,18 +176,18 @@ class ClimateFeverNliStsb(datasets.GeneratorBasedBuilder):
|
|
148 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
149 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
150 |
with open(filepath, encoding="utf-8") as f:
|
151 |
-
|
152 |
-
|
153 |
-
if self.config.name
|
154 |
# Yields examples as (key, example) tuples
|
155 |
yield key, {
|
156 |
-
"
|
157 |
-
"
|
158 |
-
"
|
159 |
}
|
160 |
-
else:
|
161 |
yield key, {
|
162 |
-
"
|
163 |
-
"
|
164 |
-
"
|
165 |
}
|
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
# TODO: Address all TODOs and remove all explanatory comments
|
15 |
+
"""Loading script for climate-fever-nli-stsb dataset"""
|
16 |
|
17 |
|
18 |
import csv
|
|
|
48 |
_URLS = {
|
49 |
}
|
50 |
|
51 |
+
_DATAFILES = {
|
52 |
+
'cf-nli': {
|
53 |
+
'zip_path': './cf-nli.zip',
|
54 |
+
'filename': {
|
55 |
+
'train': 'train.tsv',
|
56 |
+
'dev': 'dev.tsv',
|
57 |
+
'test': 'test.tsv'
|
58 |
+
}
|
59 |
+
},
|
60 |
+
'cf-nli-nei': {
|
61 |
+
'zip_path': './cf-nli-nei.zip',
|
62 |
+
'filename': {
|
63 |
+
'train': 'train.tsv',
|
64 |
+
'dev': 'dev.tsv',
|
65 |
+
'test': 'test.tsv'
|
66 |
+
}
|
67 |
+
},
|
68 |
+
'cf-stsb': {
|
69 |
+
'zip_path': './cf-stsb.zip',
|
70 |
+
'filename': {
|
71 |
+
'train': 'train.tsv',
|
72 |
+
'dev': 'dev.tsv',
|
73 |
+
'test': 'test.tsv'
|
74 |
+
}
|
75 |
+
}
|
76 |
+
}
|
77 |
|
78 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
79 |
class ClimateFeverNliStsb(datasets.GeneratorBasedBuilder):
|
|
|
140 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
141 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
142 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
143 |
+
datapath = _DATAFILES[self.config.name]
|
144 |
+
zipfile = datapath['zip_path']
|
145 |
+
filename = datapath['filename']
|
146 |
+
data_dir = dl_manager.extract(zipfile)
|
147 |
return [
|
148 |
datasets.SplitGenerator(
|
149 |
name=datasets.Split.TRAIN,
|
150 |
# These kwargs will be passed to _generate_examples
|
151 |
gen_kwargs={
|
152 |
+
"filepath": os.path.join(data_dir, filename),
|
153 |
"split": "train",
|
154 |
},
|
155 |
),
|
|
|
157 |
name=datasets.Split.VALIDATION,
|
158 |
# These kwargs will be passed to _generate_examples
|
159 |
gen_kwargs={
|
160 |
+
"filepath": os.path.join(data_dir, filename),
|
161 |
"split": "dev",
|
162 |
},
|
163 |
),
|
|
|
165 |
name=datasets.Split.TEST,
|
166 |
# These kwargs will be passed to _generate_examples
|
167 |
gen_kwargs={
|
168 |
+
"filepath": os.path.join(data_dir, filename),
|
169 |
"split": "test"
|
170 |
},
|
171 |
),
|
|
|
176 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
177 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
178 |
with open(filepath, encoding="utf-8") as f:
|
179 |
+
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
|
180 |
+
for key, row in enumerate(reader):
|
181 |
+
if self.config.name in ("cf-nli", "cf-nli-nei"):
|
182 |
# Yields examples as (key, example) tuples
|
183 |
yield key, {
|
184 |
+
"sentence1": row["sentence1"],
|
185 |
+
"sentence2": row["sentence2"],
|
186 |
+
"label": row["label"],
|
187 |
}
|
188 |
+
else: # cf-stsb
|
189 |
yield key, {
|
190 |
+
"sentence1": row["sentence1"],
|
191 |
+
"sentence2": row["sentence2"],
|
192 |
+
"score": row["score"],
|
193 |
}
|