mlynatom commited on
Commit
1846858
1 Parent(s): 28845c1

revised working version

Browse files
07/dev.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30ea40d6ed4a4e8693be1894eb0f6199a1ac924f062e3e6e4c2eea26deb52608
3
- size 38267959
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:add0cd576cf3dde8d33e849977fa6f225b198623e7981b13be7f32cd6704bdf3
3
+ size 1002348
07/test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b9e39f2a5edcd062ba8885be9395e6b1b9cc6fd01c8ec65c3d79a39da00fed1
3
- size 46130274
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51a291b2d66852c92e1170501e8d3cea4cc58e89d36196da39701c93142b8567
3
+ size 1011945
07/train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b6c651115c3b7cd1737da2f1ce72a44e0b559218035b2860aba5bb13c5c23ca
3
- size 1089761534
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:020b1173b7e50aadfd7ea20dccbd27743dcf84282d2f1fe36abf265087673511
3
+ size 16454345
csfever_v2.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ # TODO: Add description of the dataset here
26
+ # You can copy an official description
27
+ _DESCRIPTION = """\
28
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
29
+ """
30
+ #TODO
31
+ _CITATION = ""
32
+ # TODO: Add a link to an official homepage for the dataset here
33
+ _HOMEPAGE = ""
34
+
35
+ # TODO: Add the licence for the dataset here if you can find it
36
+ _LICENSE = ""
37
+
38
+ # TODO: Add link to the official dataset URLs here
39
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
40
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
41
+ _URLS = {
42
+ "original": {"train": "./original/train.jsonl",
43
+ "dev" : "./original/dev.jsonl",
44
+ "test": "./original/test.jsonl"},
45
+ "f1": {"train": "./f1/train.jsonl",
46
+ "dev" : "./f1/dev.jsonl",
47
+ "test": "./f1/test.jsonl"},
48
+ "precision": {"train": "./precision/train.jsonl",
49
+ "dev" : "./precision/dev.jsonl",
50
+ "test": "./precision/test.jsonl"},
51
+ "07": {"train": "./07/train.jsonl",
52
+ "dev" : "./07/dev.jsonl",
53
+ "test": "./07/test.jsonl"}
54
+ }
55
+
56
+ _ORIGINAL_DESCRIPTION = ""
57
+
58
+
59
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
60
+ class CsFEVERv2(datasets.GeneratorBasedBuilder):
61
+ """CsFEVERv2"""
62
+
63
+ VERSION = datasets.Version("1.1.0")
64
+
65
+ # This is an example of a dataset with multiple configurations.
66
+ # If you don't want/need to define several sub-sets in your dataset,
67
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
68
+
69
+ # If you need to make complex sub-parts in the datasets with configurable options
70
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
71
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
72
+
73
+ # You will be able to load one or the other configurations in the following list with
74
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
75
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
76
+
77
+ BUILDER_CONFIGS = [
78
+ datasets.BuilderConfig(
79
+ name="original",
80
+ version=VERSION,
81
+ description=_ORIGINAL_DESCRIPTION,
82
+ ),
83
+ datasets.BuilderConfig(
84
+ name="f1",
85
+ version=VERSION,
86
+ description=_ORIGINAL_DESCRIPTION,
87
+ ),
88
+ datasets.BuilderConfig(
89
+ name="precision",
90
+ version=VERSION,
91
+ description=_ORIGINAL_DESCRIPTION
92
+ ),
93
+ datasets.BuilderConfig(
94
+ name="07",
95
+ version=VERSION,
96
+ description=_ORIGINAL_DESCRIPTION
97
+ ),
98
+ ]
99
+
100
+ DEFAULT_CONFIG_NAME = "original" # It's not mandatory to have a default configuration. Just use one if it make sense.
101
+
102
+ def _info(self):
103
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
104
+ if self.config.name == "original": # This is the name of the configuration selected in BUILDER_CONFIGS above
105
+ features = datasets.Features(
106
+ {
107
+ "id": datasets.Value("int32"),
108
+ "label": datasets.Value("string"),
109
+ "predicted_label": datasets.Value("string"),
110
+ "predicted_score": datasets.Value("float"),
111
+ "claim": datasets.Value("string"),
112
+ "evidence": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
113
+ # These are the features of your dataset like images, labels ...
114
+ }
115
+ )
116
+ else: # This is an example to show how to have different features for "first_domain" and "second_domain"
117
+ features = datasets.Features(
118
+ {
119
+ "id": datasets.Value("int32"),
120
+ "label": datasets.Value("string"),
121
+ "claim": datasets.Value("string"),
122
+ "evidence": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
123
+ # These are the features of your dataset like images, labels ...
124
+ }
125
+ )
126
+ return datasets.DatasetInfo(
127
+ # This is the description that will appear on the datasets page.
128
+ description=_DESCRIPTION,
129
+ # This defines the different columns of the dataset and their types
130
+ features=features, # Here we define them above because they are different between the two configurations
131
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
132
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
133
+ # supervised_keys=("sentence", "label"),
134
+ # Homepage of the dataset for documentation
135
+ homepage=_HOMEPAGE,
136
+ # License for the dataset if available
137
+ license=_LICENSE,
138
+ # Citation for the dataset
139
+ citation=_CITATION,
140
+ )
141
+
142
+ def _split_generators(self, dl_manager):
143
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
144
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
145
+
146
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
147
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
148
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
149
+ urls = _URLS[self.config.name]
150
+ data_dir = dl_manager.download_and_extract(urls)
151
+ return [
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ # These kwargs will be passed to _generate_examples
155
+ gen_kwargs={
156
+ "filepath": data_dir["train"],
157
+ "split": "train",
158
+ },
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.VALIDATION,
162
+ # These kwargs will be passed to _generate_examples
163
+ gen_kwargs={
164
+ "filepath": data_dir["dev"],
165
+ "split": "dev",
166
+ },
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TEST,
170
+ # These kwargs will be passed to _generate_examples
171
+ gen_kwargs={
172
+ "filepath": data_dir["test"],
173
+ "split": "test"
174
+ },
175
+ ),
176
+ ]
177
+
178
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
179
+ def _generate_examples(self, filepath, split):
180
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
181
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
182
+ with open(filepath, encoding="utf-8") as f:
183
+ for key, row in enumerate(f):
184
+ data_point = json.loads(row)
185
+ if self.config.name == "original":
186
+ # Yields examples as (key, example) tuples
187
+ yield key, {
188
+ "id": data_point["id"],
189
+ "label": data_point["label"],
190
+ "predicted_label": data_point["predicted_label"],
191
+ "predicted_score": data_point["predicted_score"],
192
+ "claim": data_point["claim"],
193
+ "evidence": data_point["evidence"],
194
+ }
195
+ else:
196
+ yield key, {
197
+ "id": data_point["id"],
198
+ "label": data_point["label"],
199
+ "claim": data_point["claim"],
200
+ "evidence": data_point["evidence"],
201
+ }
f1/dev.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64055109dfc50bbad6f87c4eece339fcc2abc44674fd0ea8492dc447687c055f
3
- size 29561494
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b62110c2439de9b4ee76f8eebc2307de152957306ee52888089eeb0d16c6b58a
3
+ size 802795
f1/test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb3a1fcc5aee5ad52b262d0fe94b2ab94f1b3dc3d45fdb51a3ee9334a00731e6
3
- size 33700683
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9eeb0b7ee8eb0295fa56f3091eb0789468078316978fab17b430bbaab6bed66
3
+ size 798469
f1/train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe2566322e51950aac2938904afa6e00b13cd7f07174d2ca3c892e8f5daf86cc
3
- size 783373678
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e657785e53c863385a625b222b35cc3fc3a767ee6dca0c1177490a7a009c9d5
3
+ size 12373875
original/dev.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6100ad3341823b64491d21ad0e6abb4946f626f1ea03d905992bdeed822f607
3
- size 29810444
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14bd03b32f8f5770dc7f920070dc0744de72775ee24d0eb483f81b5464b393d5
3
+ size 921237
original/test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dade732ab7d1f0056fb150043b03a4ef8f826a55088f91b5c8120840b0750a0e
3
- size 39600177
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97a554572fd5b8ce694a399bf95a61e3b737f1eade453407c5eb8ff64557bb45
3
+ size 948717
original/train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9774492ef1f6e1ec0a977fbd4ba5db6fb5d24bcba2a5d1dc80fa165dce21a0bc
3
- size 1045378945
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:331affdba2bf61b2ed3c9f2e2fb5e6569a272314c07afb7dcbd3bc01004e25c2
3
+ size 18680773
precision/dev.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f24c755caf020ee7ab0176464d78ebeab15e6dcd6b2b57730bf46202e87fbd8
3
- size 22319649
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b419fc592130aa2543ad1329d9a5b2daea006585bce93a9b8ae6ecffedbd242
3
+ size 627215
precision/test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5126bf9adca16cec8a348ada64a8e5cfd6c9788ee127e4e8528965ccb1fcdfa
3
- size 24416158
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c58b15c570962fbaef430772004efea74fdae08c882d7e9774a6dc2da8c5b92e
3
+ size 629784
precision/train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d500452a87e9592c93b255284e48d5ca7cffeba5302475950a2b073ea88a681
3
- size 525615541
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b54aa8ad627a680bf663def72183d25bba95fbb80275d67cd861606e9f6f7ac4
3
+ size 8894942