mlynatom commited on
Commit
f5bfe85
1 Parent(s): 9f3eb61

revised working version, wiki_pages fixed

Browse files
Files changed (2) hide show
  1. csfever_v2.py +8 -9
  2. wiki_pages/wiki_pages.jsonl +2 -2
csfever_v2.py CHANGED
@@ -12,7 +12,7 @@
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
  # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
 
17
 
18
  import csv
@@ -25,7 +25,7 @@ import datasets
25
  # TODO: Add description of the dataset here
26
  # You can copy an official description
27
  _DESCRIPTION = """\
28
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
29
  """
30
  #TODO
31
  _CITATION = ""
@@ -35,7 +35,6 @@ _HOMEPAGE = ""
35
  # TODO: Add the licence for the dataset here if you can find it
36
  _LICENSE = ""
37
 
38
- # TODO: Add link to the official dataset URLs here
39
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
40
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
41
  _URLS = {
@@ -57,7 +56,7 @@ _URLS = {
57
  _ORIGINAL_DESCRIPTION = ""
58
 
59
 
60
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
61
  class CsFEVERv2(datasets.GeneratorBasedBuilder):
62
  """CsFEVERv2"""
63
 
@@ -106,7 +105,7 @@ class CsFEVERv2(datasets.GeneratorBasedBuilder):
106
  DEFAULT_CONFIG_NAME = "original" # It's not mandatory to have a default configuration. Just use one if it make sense.
107
 
108
  def _info(self):
109
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
110
  if self.config.name == "original": # This is the name of the configuration selected in BUILDER_CONFIGS above
111
  features = datasets.Features(
112
  {
@@ -157,7 +156,7 @@ class CsFEVERv2(datasets.GeneratorBasedBuilder):
157
  )
158
 
159
  def _split_generators(self, dl_manager):
160
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
161
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
162
 
163
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
@@ -166,14 +165,14 @@ class CsFEVERv2(datasets.GeneratorBasedBuilder):
166
  urls = _URLS[self.config.name]
167
  data_dir = dl_manager.download_and_extract(urls)
168
  if self.config.name == "wiki_pages":
169
- return datasets.SplitGenerator(
170
  name="wiki_pages",
171
  # These kwargs will be passed to _generate_examples
172
  gen_kwargs={
173
  "filepath": data_dir,
174
  "split": "wiki_pages",
175
  },
176
- )
177
  else:
178
  return [
179
  datasets.SplitGenerator(
@@ -204,7 +203,7 @@ class CsFEVERv2(datasets.GeneratorBasedBuilder):
204
 
205
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
206
  def _generate_examples(self, filepath, split):
207
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
208
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
209
  with open(filepath, encoding="utf-8") as f:
210
  for key, row in enumerate(f):
 
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
  # TODO: Address all TODOs and remove all explanatory comments
15
+ """CsFEVERv2 dataset"""
16
 
17
 
18
  import csv
 
25
  # TODO: Add description of the dataset here
26
  # You can copy an official description
27
  _DESCRIPTION = """\
28
+ This new dataset is aimed on Czech fact-checking task.
29
  """
30
  #TODO
31
  _CITATION = ""
 
35
  # TODO: Add the licence for the dataset here if you can find it
36
  _LICENSE = ""
37
 
 
38
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
39
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
40
  _URLS = {
 
56
  _ORIGINAL_DESCRIPTION = ""
57
 
58
 
59
+ #Name of the dataset usually matches the script name with CamelCase instead of snake_case
60
  class CsFEVERv2(datasets.GeneratorBasedBuilder):
61
  """CsFEVERv2"""
62
 
 
105
  DEFAULT_CONFIG_NAME = "original" # It's not mandatory to have a default configuration. Just use one if it make sense.
106
 
107
  def _info(self):
108
+ #This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
109
  if self.config.name == "original": # This is the name of the configuration selected in BUILDER_CONFIGS above
110
  features = datasets.Features(
111
  {
 
156
  )
157
 
158
  def _split_generators(self, dl_manager):
159
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
160
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
161
 
162
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
 
165
  urls = _URLS[self.config.name]
166
  data_dir = dl_manager.download_and_extract(urls)
167
  if self.config.name == "wiki_pages":
168
+ return [datasets.SplitGenerator(
169
  name="wiki_pages",
170
  # These kwargs will be passed to _generate_examples
171
  gen_kwargs={
172
  "filepath": data_dir,
173
  "split": "wiki_pages",
174
  },
175
+ )]
176
  else:
177
  return [
178
  datasets.SplitGenerator(
 
203
 
204
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
205
  def _generate_examples(self, filepath, split):
206
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
207
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
208
  with open(filepath, encoding="utf-8") as f:
209
  for key, row in enumerate(f):
wiki_pages/wiki_pages.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:97a554572fd5b8ce694a399bf95a61e3b737f1eade453407c5eb8ff64557bb45
3
- size 948717
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:036940c23045d7e8d04339adae711860e5e1b76741d2de653c0691c4c6d7f10d
3
+ size 1140517712