Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
Maurice Weber commited on
Commit
f2c5483
1 Parent(s): 2cae54f

fix missing urls

Browse files
Files changed (1) hide show
  1. RedPajama-Data-V2.py +126 -152
RedPajama-Data-V2.py CHANGED
@@ -19,7 +19,6 @@ import json
19
 
20
  import datasets
21
  import traceback
22
- import os
23
  import gzip
24
  from typing import List
25
 
@@ -33,7 +32,8 @@ RedPajama V2: an Open Dataset for Training Large Language Models
33
 
34
  _URL_BASE = 'https://data.together.xyz/redpajama-data-v2/v1.0.0'
35
  _LANGUAGES = ("en", "de", "fr", "es", "it")
36
- _LISTINGS_PATTERN = "listings/{language}-{snapshot}-{partition}.txt"
 
37
 
38
  _CC_SNAPSHOT_IDS = (
39
  "2014-15",
@@ -168,41 +168,39 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
168
  )
169
 
170
  def _split_generators_sample(self, dl_manager):
171
- # fetch documents
172
- sample_listings = dl_manager.download_and_extract(
173
  "sample/sample_listings.txt"
174
  )
175
- with open(sample_listings, "r") as fd:
176
- listings = [line.strip() for line in fd]
177
 
178
  # fetch documents
 
179
  documents_files = dl_manager.download({
180
- "head_middle": [
181
- f"sample/documents/{lst}.json.gz" for lst in listings
182
- ]
183
  })
184
 
185
  # fetch quality signals
 
186
  quality_signals_files = dl_manager.download({
187
- "head_middle": [
188
- f"sample/quality_signals/{lst}.signals.json.gz"
189
- for lst in listings
190
- ]
191
  })
192
 
193
  # fetch ids of duplicates
 
194
  duplicates_ids_files = dl_manager.download({
195
- "head_middle": [
196
- f"sample/duplicates/{lst}.duplicates.parquet"
197
- for lst in listings
198
- ]
199
  })
200
 
201
  return [
202
  datasets.SplitGenerator(
203
  name=datasets.Split.TRAIN,
204
  gen_kwargs={
205
- "listings_ids": {"head_middle": listings},
206
  "documents_files": documents_files,
207
  "quality_signals_files": quality_signals_files,
208
  "duplicates_ids_files": duplicates_ids_files
@@ -215,76 +213,72 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
215
  languages = getattr(self.config, 'languages', _LANGUAGES)
216
  partition = getattr(self.config, 'partition', 'all')
217
 
218
- partitions = {
219
- "all": ["head_middle", "tail"]
220
- }.get(partition, [partition])
221
-
222
- # nested structure: partition -> urls
223
- listings_files_urls = {}
224
- for part in partitions:
225
- listings_files_urls[part] = []
226
- for snapshot_id in snapshots:
227
- for lang in languages:
228
- listings_files_urls[part].append(
229
- _LISTINGS_PATTERN.format(
230
- language=lang,
231
- snapshot=snapshot_id,
232
- partition=part,
233
- )
234
- )
235
-
236
- # fetch listings from hub
237
- listings_files = dl_manager.download_and_extract(listings_files_urls)
238
-
239
- # fetch listings
240
- listings_ids = {}
241
- for part, part_listings_files in listings_files.items():
242
- listings_ids[part] = []
243
- for listings_file in part_listings_files:
244
- with open(listings_file, encoding="utf-8") as f:
245
- listings_ids[part].extend([
246
- line.strip() for line in f
247
- ])
248
-
249
- # build urls pointing to documents, quality signals and duplicate ids
250
- document_urls = {}
251
- quality_signals_urls = {}
252
- duplicates_ids_urls = {}
253
- for part, part_listings_ids in listings_ids.items():
254
-
255
- document_urls[part] = []
256
- quality_signals_urls[part] = []
257
- duplicates_ids_urls[part] = []
258
-
259
- for lst_id in part_listings_ids:
260
- document_urls[part].append(
261
- os.path.join(_URL_BASE, f"documents/{lst_id}.json.gz")
262
- )
263
-
264
- if part != "head_middle":
265
- continue
266
-
267
- quality_signals_urls[part].append(
268
- os.path.join(
269
- _URL_BASE, f"quality_signals/{lst_id}.signals.json.gz"
270
- )
271
- )
272
 
273
- duplicates_ids_urls[part].append(
274
- os.path.join(
275
- _URL_BASE, f"duplicates/{lst_id}.duplicates.parquet"
276
- )
277
- )
278
 
279
- documents_files = dl_manager.download(document_urls)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  quality_signals_files = dl_manager.download(quality_signals_urls)
 
 
 
281
  duplicates_ids_files = dl_manager.download(duplicates_ids_urls)
282
 
283
  return [
284
  datasets.SplitGenerator(
285
  name=datasets.Split.TRAIN,
286
  gen_kwargs={
287
- "listings_ids": listings_ids,
288
  "documents_files": documents_files,
289
  "quality_signals_files": quality_signals_files,
290
  "duplicates_ids_files": duplicates_ids_files
@@ -299,91 +293,71 @@ class RedPajamaV2(datasets.GeneratorBasedBuilder):
299
  return self._split_generators_full(dl_manager)
300
 
301
  def _generate_examples(
302
- self, listings_ids, documents_files, quality_signals_files,
303
  duplicates_ids_files
304
  ):
305
  key = 0
306
- for part in documents_files.keys():
307
- part_docs_files = documents_files[part]
308
- part_qs_files = quality_signals_files[part]
309
- part_listings_ids = listings_ids[part]
310
- part_duplicates_ids_files = duplicates_ids_files[part]
311
-
312
- if len(part_qs_files) == 0:
313
- for sample in self._handle_tail_partition(
314
- part, part_docs_files, part_listings_ids
315
- ):
316
- yield key, sample
317
- key += 1
318
- continue
319
 
320
- for sample in self._handle_head_middle_partition(
321
- part, part_docs_files, part_qs_files,
322
- part_duplicates_ids_files, part_listings_ids
323
 
 
 
324
  ):
325
  yield key, sample
326
  key += 1
327
 
328
- def _handle_tail_partition(self, part, docs_files, listings_ids):
329
- for doc_file, listing_id in zip(docs_files, listings_ids):
330
- with gzip.open(doc_file, "rt", encoding="utf-8") as df:
331
- for row, doc in enumerate(df):
332
- doc_id = f"{listing_id}.json.gz/{row}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
  try:
334
- yield self.handle_record(part, doc_id, doc, None, None)
 
 
335
  except Exception as e:
336
- print(f'doc_file: {doc_file}')
337
- print(f'row: {row}')
338
  traceback.print_exc()
339
- raise e
340
-
341
- def _handle_head_middle_partition(
342
- self, part, docs_files, qs_files, dupes_files, listings_ids,
343
- ):
344
- assert len(docs_files) == len(qs_files)
345
-
346
- listings_ids = listings_ids[:len(docs_files)]
347
-
348
- for doc_file, qs_file, dupe_file, listings_id in zip(
349
- docs_files, qs_files, dupes_files, listings_ids
350
- ):
351
- # load duplicates
352
- try:
353
- with open(dupe_file, "rb") as df:
354
- duplicates = set(pq.read_table(
355
- df, columns=["doc_id"], use_pandas_metadata=False
356
- )["doc_id"].to_pylist())
357
- except Exception as e:
358
- print(f'failed loading duplicate ids from {dupe_file}.')
359
- duplicates = set()
360
-
361
- try:
362
- with gzip.open(doc_file, "rt", encoding="utf-8") as df:
363
- with gzip.open(qs_file, "rt", encoding="utf-8") as qf:
364
- for row, (doc, qs) in enumerate(zip(df, qf)):
365
- doc_id = f"{listings_id}.json.gz/{row}"
366
-
367
- if doc_id in duplicates:
368
- is_duplicate = True
369
- else:
370
- is_duplicate = False
371
-
372
- try:
373
- yield self.handle_record(
374
- part, doc_id, doc, qs,
375
- is_duplicate=is_duplicate
376
- )
377
- except Exception as e:
378
- print(f'failed handling row {row} in '
379
- f'{doc_file} ({qs_file})')
380
- traceback.print_exc()
381
- continue
382
- except gzip.BadGzipFile as e:
383
- # skip broken gzip files
384
- print(f'BadGzipFile: {doc_file, qs_file}')
385
- traceback.print_exc()
386
- continue
387
 
388
  @staticmethod
389
  def handle_record(part, doc_id, doc, qs, is_duplicate=None):
 
19
 
20
  import datasets
21
  import traceback
 
22
  import gzip
23
  from typing import List
24
 
 
32
 
33
  _URL_BASE = 'https://data.together.xyz/redpajama-data-v2/v1.0.0'
34
  _LANGUAGES = ("en", "de", "fr", "es", "it")
35
+ _MISSING_FILES_PATTERN = "urls/missing-{component}.txt"
36
+ _NUM_SHARDS = 5000
37
 
38
  _CC_SNAPSHOT_IDS = (
39
  "2014-15",
 
168
  )
169
 
170
  def _split_generators_sample(self, dl_manager):
171
+ # fetch list of base tags
172
+ sample_base_tags_fp = dl_manager.download_and_extract(
173
  "sample/sample_listings.txt"
174
  )
175
+ with open(sample_base_tags_fp, "r") as fd:
176
+ sample_base_tags = [line.strip() for line in fd]
177
 
178
  # fetch documents
179
+ logger.info(f"Downloading {len(sample_base_tags)} documents files.")
180
  documents_files = dl_manager.download({
181
+ base_tag: f"sample/documents/{base_tag}.json.gz"
182
+ for base_tag in sample_base_tags
 
183
  })
184
 
185
  # fetch quality signals
186
+ logger.info(f"Downloading {len(sample_base_tags)} quality signals files.")
187
  quality_signals_files = dl_manager.download({
188
+ base_tag: f"sample/quality_signals/{base_tag}.signals.json.gz"
189
+ for base_tag in sample_base_tags
 
 
190
  })
191
 
192
  # fetch ids of duplicates
193
+ logger.info(f"Downloading {len(sample_base_tags)} duplicates ids files.")
194
  duplicates_ids_files = dl_manager.download({
195
+ base_tag: f"sample/duplicates/{base_tag}.duplicates.parquet"
196
+ for base_tag in sample_base_tags
 
 
197
  })
198
 
199
  return [
200
  datasets.SplitGenerator(
201
  name=datasets.Split.TRAIN,
202
  gen_kwargs={
203
+ "base_tags": sample_base_tags,
204
  "documents_files": documents_files,
205
  "quality_signals_files": quality_signals_files,
206
  "duplicates_ids_files": duplicates_ids_files
 
213
  languages = getattr(self.config, 'languages', _LANGUAGES)
214
  partition = getattr(self.config, 'partition', 'all')
215
 
216
+ if partition == 'all':
217
+ partitions = ['head', 'middle', 'tail']
218
+ elif partition == 'head_middle':
219
+ partitions = ['head', 'middle']
220
+ elif partition == 'tail':
221
+ partitions = [partition]
222
+ else:
223
+ raise ValueError(f'invalid partition: {partition}')
224
+
225
+ # fetch list of missing files (e.g., missing duplicates or corrupted documents and
226
+ # quality signal files)
227
+ missing_files_paths = dl_manager.download_and_extract({
228
+ component: _MISSING_FILES_PATTERN.format(component=component)
229
+ for component in ("documents", "signals", "duplicates")
230
+ })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
+ missing_files = {}
233
+ for component, missing_file in missing_files_paths.items():
234
+ with open(missing_file) as f:
235
+ missing_files[component] = set(line.strip() for line in f)
 
236
 
237
+ # build list of urls to fetch
238
+ documents_urls = {}
239
+ quality_signals_urls = {}
240
+ duplicates_ids_urls = {}
241
+ base_tags = []
242
+
243
+ for lang in languages:
244
+ for snapshot in snapshots:
245
+ for part in partitions:
246
+ for n in range(_NUM_SHARDS):
247
+ base_tag = f"{snapshot}/{n:04d}/{lang}_{part}"
248
+ base_tags.append(base_tag)
249
+
250
+ # documents
251
+ url = f"{_URL_BASE}/documents/{base_tag}.json.gz"
252
+ if url not in missing_files["documents"]:
253
+ documents_urls[base_tag] = url
254
+
255
+ # quality signals
256
+ url = f"{_URL_BASE}/quality_signals/{base_tag}.signals.json.gz"
257
+ if url not in missing_files["signals"]:
258
+ quality_signals_urls[base_tag] = url
259
+
260
+ # duplicates
261
+ url = f"{_URL_BASE}/duplicates/{base_tag}.duplicates.parquet"
262
+ if url not in missing_files["duplicates"]:
263
+ duplicates_ids_urls[base_tag] = url
264
+
265
+ # download documents files
266
+ logger.info(f"Downloading {len(documents_urls)} documents files.")
267
+ documents_files = dl_manager.download(documents_urls)
268
+
269
+ # download quality signals files
270
+ logger.info(f"Downloading {len(quality_signals_urls)} quality signals files.")
271
  quality_signals_files = dl_manager.download(quality_signals_urls)
272
+
273
+ # download duplicates ids files
274
+ logger.info(f"Downloading {len(duplicates_ids_urls)} duplicates ids files.")
275
  duplicates_ids_files = dl_manager.download(duplicates_ids_urls)
276
 
277
  return [
278
  datasets.SplitGenerator(
279
  name=datasets.Split.TRAIN,
280
  gen_kwargs={
281
+ "base_tags": base_tags,
282
  "documents_files": documents_files,
283
  "quality_signals_files": quality_signals_files,
284
  "duplicates_ids_files": duplicates_ids_files
 
293
  return self._split_generators_full(dl_manager)
294
 
295
  def _generate_examples(
296
+ self, base_tags, documents_files, quality_signals_files,
297
  duplicates_ids_files
298
  ):
299
  key = 0
300
+ for base_tag in base_tags:
301
+ doc_file = documents_files.get(base_tag)
302
+ qs_file = quality_signals_files.get(base_tag)
303
+ dupe_file = duplicates_ids_files.get(base_tag)
 
 
 
 
 
 
 
 
 
304
 
305
+ if doc_file is None:
306
+ continue
 
307
 
308
+ for sample in self.__get_generator(
309
+ base_tag, doc_file, qs_file, dupe_file
310
  ):
311
  yield key, sample
312
  key += 1
313
 
314
+ def __get_generator(self, base_tag, doc_file, qs_file, dupe_file):
315
+ if "_tail" in base_tag:
316
+ yield from self._handle_tail(base_tag, doc_file, qs_file, dupe_file)
317
+ else:
318
+ yield from self._handle_head_middle(base_tag, doc_file, qs_file, dupe_file)
319
+
320
+ def _handle_tail(self, base_tag, doc_file, qs_file, dupe_file):
321
+ with gzip.open(doc_file, "rt", encoding="utf-8") as df:
322
+ for row, doc in enumerate(df):
323
+ doc_id = f"{base_tag}.json.gz/{row}"
324
+ try:
325
+ yield self.handle_record("tail", doc_id, doc, None, None)
326
+ except Exception as e:
327
+ logger.warning(f'failed handling row {row} in {doc_file}')
328
+ traceback.print_exc()
329
+ continue
330
+
331
+ def _handle_head_middle(
332
+ self, base_tag, doc_file, qs_file, dupe_file
333
+ ):
334
+ if qs_file is None:
335
+ yield from self._handle_tail(base_tag, doc_file, None, None)
336
+ return
337
+
338
+ # load duplicates
339
+ try:
340
+ with open(dupe_file, "rb") as df:
341
+ duplicates = set(pq.read_table(
342
+ df, columns=["doc_id"], use_pandas_metadata=False
343
+ )["doc_id"].to_pylist())
344
+ except Exception as e:
345
+ logger.warning(f'no duplicate ids found for {base_tag}')
346
+ duplicates = set()
347
+
348
+ with gzip.open(doc_file, "rt", encoding="utf-8") as df:
349
+ with gzip.open(qs_file, "rt", encoding="utf-8") as qf:
350
+ for row, (doc, qs) in enumerate(zip(df, qf)):
351
+ doc_id = f"{base_tag}.json.gz/{row}"
352
+
353
  try:
354
+ yield self.handle_record(
355
+ "head_middle", doc_id, doc, qs, is_duplicate=doc_id in duplicates
356
+ )
357
  except Exception as e:
358
+ logger.warning(f'failed handling row {row} in {doc_file} ({qs_file})')
 
359
  traceback.print_exc()
360
+ continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
 
362
  @staticmethod
363
  def handle_record(part, doc_id, doc, qs, is_duplicate=None):