mlynatom commited on
Commit
5d92619
·
1 Parent(s): ef65601

Revert "Delete csfever_v2.py"

Browse files

This reverts commit 4b9ba4ca445c19aa6e8037daeb7bb505d672d70c.

Files changed (1) hide show
  1. csfever_v2.py +286 -0
csfever_v2.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """CsFEVERv2 dataset"""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ # TODO: Add description of the dataset here
26
+ # You can copy an official description
27
+ _DESCRIPTION = """\
28
+ This new dataset is aimed on Czech fact-checking task.
29
+ """
30
+ #TODO
31
+ _CITATION = ""
32
+ # TODO: Add a link to an official homepage for the dataset here
33
+ _HOMEPAGE = ""
34
+
35
+ # TODO: Add the licence for the dataset here if you can find it
36
+ _LICENSE = ""
37
+
38
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
39
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
40
+ _URLS = {
41
+ "original": {"train": "./original/train.jsonl",
42
+ "dev" : "./original/dev.jsonl",
43
+ "test": "./original/test.jsonl"},
44
+ "f1": {"train": "./f1/train.jsonl",
45
+ "dev" : "./f1/dev.jsonl",
46
+ "test": "./f1/test.jsonl"},
47
+ "precision": {"train": "./precision/train.jsonl",
48
+ "dev" : "./precision/dev.jsonl",
49
+ "test": "./precision/test.jsonl"},
50
+ "07": {"train": "./07/train.jsonl",
51
+ "dev" : "./07/dev.jsonl",
52
+ "test": "./07/test.jsonl"},
53
+ "wiki_pages": "./wiki_pages/wiki_pages.jsonl",
54
+ "original_nli": {"train": "./original_nli/train.jsonl",
55
+ "dev" : "./original_nli/dev.jsonl",
56
+ "test": "./original_nli/test.jsonl"},
57
+ "f1_nli": {"train": "./f1_nli/train.jsonl",
58
+ "dev" : "./f1_nli/dev.jsonl",
59
+ "test": "./f1_nli/test.jsonl"},
60
+ "07_nli": {"train": "./07_nli/train.jsonl",
61
+ "dev" : "./07_nli/dev.jsonl",
62
+ "test": "./07_nli/test.jsonl"},
63
+ "precision_nli": {"train": "./precision_nli/train.jsonl",
64
+ "dev" : "./precision_nli/dev.jsonl",
65
+ "test": "./precision_nli/test.jsonl"},
66
+ }
67
+
68
+ _ORIGINAL_DESCRIPTION = ""
69
+
70
+ _NLI_NAMES = ["original_nli", "07_nli", "precision_nli", "f1_nli"]
71
+
72
+
73
+ #Name of the dataset usually matches the script name with CamelCase instead of snake_case
74
+ class CsFEVERv2(datasets.GeneratorBasedBuilder):
75
+ """CsFEVERv2"""
76
+
77
+ VERSION = datasets.Version("1.1.0")
78
+
79
+ # This is an example of a dataset with multiple configurations.
80
+ # If you don't want/need to define several sub-sets in your dataset,
81
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
82
+
83
+ # If you need to make complex sub-parts in the datasets with configurable options
84
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
85
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
86
+
87
+ # You will be able to load one or the other configurations in the following list with
88
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
89
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
90
+
91
+ BUILDER_CONFIGS = [
92
+ datasets.BuilderConfig(
93
+ name="original",
94
+ version=VERSION,
95
+ description=_ORIGINAL_DESCRIPTION,
96
+ ),
97
+ datasets.BuilderConfig(
98
+ name="f1",
99
+ version=VERSION,
100
+ description=_ORIGINAL_DESCRIPTION,
101
+ ),
102
+ datasets.BuilderConfig(
103
+ name="precision",
104
+ version=VERSION,
105
+ description=_ORIGINAL_DESCRIPTION
106
+ ),
107
+ datasets.BuilderConfig(
108
+ name="07",
109
+ version=VERSION,
110
+ description=_ORIGINAL_DESCRIPTION
111
+ ),
112
+ datasets.BuilderConfig(
113
+ name="wiki_pages",
114
+ version=VERSION,
115
+ description=_ORIGINAL_DESCRIPTION
116
+ ),
117
+ datasets.BuilderConfig(
118
+ name="original_nli",
119
+ version=VERSION,
120
+ description=_ORIGINAL_DESCRIPTION
121
+ ),
122
+ datasets.BuilderConfig(
123
+ name="07_nli",
124
+ version=VERSION,
125
+ description=_ORIGINAL_DESCRIPTION
126
+ ),
127
+ datasets.BuilderConfig(
128
+ name="f1_nli",
129
+ version=VERSION,
130
+ description=_ORIGINAL_DESCRIPTION
131
+ ),
132
+ datasets.BuilderConfig(
133
+ name="precision_nli",
134
+ version=VERSION,
135
+ description=_ORIGINAL_DESCRIPTION
136
+ ),
137
+ ]
138
+
139
+ DEFAULT_CONFIG_NAME = "original" # It's not mandatory to have a default configuration. Just use one if it make sense.
140
+
141
+ def _info(self):
142
+ #This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
143
+ if self.config.name == "original": # This is the name of the configuration selected in BUILDER_CONFIGS above
144
+ features = datasets.Features(
145
+ {
146
+ "id": datasets.Value("int32"),
147
+ "label": datasets.Value("string"),
148
+ "predicted_label": datasets.Value("string"),
149
+ "predicted_score": datasets.Value("float"),
150
+ "claim": datasets.Value("string"),
151
+ "evidence": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
152
+ # These are the features of your dataset like images, labels ...
153
+ }
154
+ )
155
+ elif self.config.name in _NLI_NAMES: # This is the name of the configuration selected in BUILDER_CONFIGS above
156
+ features = datasets.Features(
157
+ {
158
+ "id": datasets.Value("int32"),
159
+ "label": datasets.ClassLabel(num_classes=3, names=["SUPPORTS", "REFUTES", "NOT ENOUGH INFO"]),
160
+ "claim": datasets.Value("string"),
161
+ "evidence": datasets.Value("string"),
162
+ # These are the features of your dataset like images, labels ...
163
+ }
164
+ )
165
+ elif self.config.name == "wiki_pages":
166
+ features = datasets.Features(
167
+ {
168
+ "id": datasets.Value("int32"),
169
+ "revid": datasets.Value("int32"),
170
+ "url": datasets.Value("string"),
171
+ "title": datasets.Value("string"),
172
+ "text": datasets.Value("string"),
173
+ # These are the features of your dataset like images, labels ...
174
+ }
175
+ )
176
+ else: # This is an example to show how to have different features for "first_domain" and "second_domain"
177
+ features = datasets.Features(
178
+ {
179
+ "id": datasets.Value("int32"),
180
+ "label": datasets.Value("string"),
181
+ "claim": datasets.Value("string"),
182
+ "evidence": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
183
+ # These are the features of your dataset like images, labels ...
184
+ }
185
+ )
186
+ return datasets.DatasetInfo(
187
+ # This is the description that will appear on the datasets page.
188
+ description=_DESCRIPTION,
189
+ # This defines the different columns of the dataset and their types
190
+ features=features, # Here we define them above because they are different between the two configurations
191
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
192
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
193
+ # supervised_keys=("sentence", "label"),
194
+ # Homepage of the dataset for documentation
195
+ homepage=_HOMEPAGE,
196
+ # License for the dataset if available
197
+ license=_LICENSE,
198
+ # Citation for the dataset
199
+ citation=_CITATION,
200
+ )
201
+
202
+ def _split_generators(self, dl_manager):
203
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
204
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
205
+
206
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
207
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
208
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
209
+ urls = _URLS[self.config.name]
210
+ data_dir = dl_manager.download_and_extract(urls)
211
+ if self.config.name == "wiki_pages":
212
+ return [datasets.SplitGenerator(
213
+ name="wiki_pages",
214
+ # These kwargs will be passed to _generate_examples
215
+ gen_kwargs={
216
+ "filepath": data_dir,
217
+ "split": "wiki_pages",
218
+ },
219
+ )]
220
+ else:
221
+ return [
222
+ datasets.SplitGenerator(
223
+ name=datasets.Split.TRAIN,
224
+ # These kwargs will be passed to _generate_examples
225
+ gen_kwargs={
226
+ "filepath": data_dir["train"],
227
+ "split": "train",
228
+ },
229
+ ),
230
+ datasets.SplitGenerator(
231
+ name=datasets.Split.VALIDATION,
232
+ # These kwargs will be passed to _generate_examples
233
+ gen_kwargs={
234
+ "filepath": data_dir["dev"],
235
+ "split": "dev",
236
+ },
237
+ ),
238
+ datasets.SplitGenerator(
239
+ name=datasets.Split.TEST,
240
+ # These kwargs will be passed to _generate_examples
241
+ gen_kwargs={
242
+ "filepath": data_dir["test"],
243
+ "split": "test"
244
+ },
245
+ ),
246
+ ]
247
+
248
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
249
+ def _generate_examples(self, filepath, split):
250
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
251
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
252
+ with open(filepath, encoding="utf-8") as f:
253
+ for key, row in enumerate(f):
254
+ data_point = json.loads(row)
255
+ if self.config.name == "original":
256
+ # Yields examples as (key, example) tuples
257
+ yield key, {
258
+ "id": data_point["id"],
259
+ "label": data_point["label"],
260
+ "predicted_label": data_point["predicted_label"],
261
+ "predicted_score": data_point["predicted_score"],
262
+ "claim": data_point["claim"],
263
+ "evidence": data_point["evidence"],
264
+ }
265
+ elif self.config.name in _NLI_NAMES:
266
+ yield key, {
267
+ "id": data_point["id"],
268
+ "label": data_point["label"],
269
+ "claim": data_point["claim"],
270
+ "evidence": data_point["evidence"],
271
+ }
272
+ elif self.config.name == "wiki_pages":
273
+ yield key, {
274
+ "id": data_point["id"],
275
+ "revid": data_point["revid"],
276
+ "url": data_point["url"],
277
+ "title": data_point["title"],
278
+ "text": data_point["text"],
279
+ }
280
+ else:
281
+ yield key, {
282
+ "id": data_point["id"],
283
+ "label": data_point["label"],
284
+ "claim": data_point["claim"],
285
+ "evidence": data_point["evidence"],
286
+ }