JeremyAlain commited on
Commit
c9a0838
1 Parent(s): bb815c1

loading script created

Browse files
Files changed (1) hide show
  1. 123_test.py +10 -5
123_test.py CHANGED
@@ -45,9 +45,9 @@ _LICENSE = "Apache 2.0"
45
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
 
47
  _URLS = {
48
- "data_0": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/0/file_{}.jsonl".format(i) for i in range(20)],
49
- "data_1": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/1/file_{}.jsonl".format(i) for i in range(20)],
50
- "data_2": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/2/file_{}.jsonl".format(i) for i in range(20)],
51
 
52
  }
53
  logger = datasets.logging.get_logger(__name__)
@@ -113,13 +113,18 @@ class FewshotPretraining(datasets.GeneratorBasedBuilder):
113
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
114
  urls = _URLS[self.config.name]
115
 
116
- extracted_paths = dl_manager.download_and_extract(urls)
 
 
 
 
 
117
  return [
118
  datasets.SplitGenerator(
119
  name=datasets.Split.TRAIN,
120
  # These kwargs will be passed to _generate_examples
121
  gen_kwargs={
122
- "file_paths": extracted_paths,
123
  },
124
  )
125
  ]
45
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
 
47
  _URLS = {
48
+ "data_0": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/files_0.jsonl"],
49
+ "data_1": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/files_1.jsonl"],
50
+ "data_2": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/files_2.jsonl"],
51
 
52
  }
53
  logger = datasets.logging.get_logger(__name__)
113
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
114
  urls = _URLS[self.config.name]
115
 
116
+ local_extracted_path = dl_manager.download_and_extract(urls)[0]
117
+ all_file_names_for_dataset_pd = pd.read_json(local_extracted_path, lines=True, orient="records")
118
+ all_file_names_for_dataset = all_file_names_for_dataset_pd.values.tolist()
119
+ all_file_names_for_dataset = [file_name[0] for file_name in all_file_names_for_dataset]
120
+
121
+ all_local_extracted_paths = dl_manager.download_and_extract(all_file_names_for_dataset)
122
  return [
123
  datasets.SplitGenerator(
124
  name=datasets.Split.TRAIN,
125
  # These kwargs will be passed to _generate_examples
126
  gen_kwargs={
127
+ "file_paths": all_local_extracted_paths,
128
  },
129
  )
130
  ]