system HF staff commited on
Commit
344580d
1 Parent(s): ff87a20

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

Files changed (1) hide show
  1. pg19.py +4 -5
pg19.py CHANGED
@@ -40,9 +40,9 @@ One could use this dataset for benchmarking long-range language models, or use i
40
  """
41
 
42
  _ASSET_ROOT_URL = "https://storage.googleapis.com/deepmind-gutenberg/"
43
- _STORAGE_API_ROOT_URL = "https://storage.googleapis.com/storage/v1/b/deepmind-gutenberg/o"
44
 
45
- _METADATA_URL = os.path.join(_ASSET_ROOT_URL, "metadata.csv")
46
 
47
 
48
  def flat_map(fn, arr):
@@ -116,14 +116,13 @@ class Pg19(datasets.GeneratorBasedBuilder):
116
  return json.load(f)
117
 
118
  splits = ["train", "validation", "test"]
119
- split_paths = map(lambda path: os.path.join(_STORAGE_API_ROOT_URL, path), splits)
120
  split_paths = dl_manager.download_custom(dict(zip(splits, split_paths)), download_listdir)
121
 
122
  file_urls = list(map(filepath_to_json, split_paths.values()))
123
 
124
  complete_file_urls = [
125
- list(map(lambda url: os.path.join(_ASSET_ROOT_URL, url), urls))
126
- for (split_path, urls) in zip(split_paths, file_urls)
127
  ]
128
  urls_to_download = {(get_filename(url)): url for urls in complete_file_urls for url in urls}
129
 
 
40
  """
41
 
42
  _ASSET_ROOT_URL = "https://storage.googleapis.com/deepmind-gutenberg/"
43
+ _STORAGE_API_ROOT_URL = "https://storage.googleapis.com/storage/v1/b/deepmind-gutenberg/o/"
44
 
45
+ _METADATA_URL = _ASSET_ROOT_URL + "metadata.csv"
46
 
47
 
48
  def flat_map(fn, arr):
 
116
  return json.load(f)
117
 
118
  splits = ["train", "validation", "test"]
119
+ split_paths = map(lambda path: _STORAGE_API_ROOT_URL + path, splits)
120
  split_paths = dl_manager.download_custom(dict(zip(splits, split_paths)), download_listdir)
121
 
122
  file_urls = list(map(filepath_to_json, split_paths.values()))
123
 
124
  complete_file_urls = [
125
+ list(map(lambda url: _ASSET_ROOT_URL + url, urls)) for (split_path, urls) in zip(split_paths, file_urls)
 
126
  ]
127
  urls_to_download = {(get_filename(url)): url for urls in complete_file_urls for url in urls}
128