Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
system HF staff commited on
Commit
988152f
1 Parent(s): fdebd15

Update files from the datasets library (from 1.13.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.13.0

Files changed (2) hide show
  1. README.md +1 -0
  2. scitldr.py +8 -21
README.md CHANGED
@@ -18,6 +18,7 @@ task_categories:
18
  task_ids:
19
  - summarization
20
  paperswithcode_id: scitldr
 
21
  ---
22
 
23
  # Dataset Card for SciTLDR
18
  task_ids:
19
  - summarization
20
  paperswithcode_id: scitldr
21
+ pretty_name: SciTLDR
22
  ---
23
 
24
  # Dataset Card for SciTLDR
scitldr.py CHANGED
@@ -124,42 +124,29 @@ class Scitldr(datasets.GeneratorBasedBuilder):
124
 
125
  def _split_generators(self, dl_manager):
126
  """Returns SplitGenerators."""
127
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
128
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
129
-
130
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
131
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
132
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
133
  urls = {
134
- "train": os.path.join(_URLs[self.config.name], _TRAIN_DATA),
135
- "valid": os.path.join(_URLs[self.config.name], _VALID_DATA),
136
- "test": os.path.join(_URLs[self.config.name], _TEST_DATA),
137
  }
138
- data_dir = dl_manager.download_and_extract(urls)
139
  return [
140
  datasets.SplitGenerator(
141
  name=datasets.Split.TRAIN,
142
- # These kwargs will be passed to _generate_examples
143
- gen_kwargs={"filepath": os.path.join(data_dir["train"]), "split": "train"},
144
  ),
145
  datasets.SplitGenerator(
146
  name=datasets.Split.TEST,
147
- # These kwargs will be passed to _generate_examples
148
- gen_kwargs={"filepath": os.path.join(data_dir["test"]), "split": "test"},
149
  ),
150
  datasets.SplitGenerator(
151
  name=datasets.Split.VALIDATION,
152
- # These kwargs will be passed to _generate_examples
153
- gen_kwargs={"filepath": os.path.join(data_dir["valid"]), "split": "dev"},
154
  ),
155
  ]
156
 
157
- def _generate_examples(self, filepath, split):
158
  """Yields examples."""
159
- # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
160
- # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
161
- # The key is not important, it's more here for legacy reason (legacy from tfds)
162
-
163
  with open(filepath, encoding="utf-8") as f:
164
  for id_, row in enumerate(f):
165
  data = json.loads(row)
124
 
125
  def _split_generators(self, dl_manager):
126
  """Returns SplitGenerators."""
 
 
 
 
 
 
127
  urls = {
128
+ "train": _URLs[self.config.name] + _TRAIN_DATA,
129
+ "valid": _URLs[self.config.name] + _VALID_DATA,
130
+ "test": _URLs[self.config.name] + _TEST_DATA,
131
  }
132
+ data_dir = dl_manager.download(urls)
133
  return [
134
  datasets.SplitGenerator(
135
  name=datasets.Split.TRAIN,
136
+ gen_kwargs={"filepath": os.path.join(data_dir["train"])},
 
137
  ),
138
  datasets.SplitGenerator(
139
  name=datasets.Split.TEST,
140
+ gen_kwargs={"filepath": os.path.join(data_dir["test"])},
 
141
  ),
142
  datasets.SplitGenerator(
143
  name=datasets.Split.VALIDATION,
144
+ gen_kwargs={"filepath": os.path.join(data_dir["valid"])},
 
145
  ),
146
  ]
147
 
148
+ def _generate_examples(self, filepath):
149
  """Yields examples."""
 
 
 
 
150
  with open(filepath, encoding="utf-8") as f:
151
  for id_, row in enumerate(f):
152
  data = json.loads(row)