TomTBT commited on
Commit
a56c473
1 Parent(s): 88a4d02

Added download of increments

Browse files
Files changed (1) hide show
  1. pmc_open_access_xml.py +22 -25
pmc_open_access_xml.py CHANGED
@@ -488,15 +488,13 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
488
 
489
  for baseline in baselines:
490
  baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv"
491
- try:
492
- baseline_file_list = dl_manager.download(baseline_file_list_url)
493
- except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
494
- continue
495
  baseline_archive_url = f"{url}{basename}{baseline}.tar.gz"
496
  try:
 
497
  baseline_archive = dl_manager.download(baseline_archive_url)
498
- except FileNotFoundError:
499
  continue
 
500
  baseline_file_lists.append(baseline_file_list)
501
  baseline_archives.append(baseline_archive)
502
 
@@ -505,21 +503,22 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
505
  # Incremental commented because some articles are already in the main parts (updates?)
506
  # Need to find a way to add them to the dataset without duplicating the articles.
507
  # Also adding them would mean that each new day the dataset is loaded, the whole dataset is recreated.
508
- # date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
509
- # incremental_dates = [
510
- # (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat()
511
- # for i in range(date_delta.days)
512
- # ]
513
- # incrementals = [f"incr.{date}" for date in incremental_dates]
514
- # incremental_urls = {
515
- # "incremental_file_lists": [
516
- # f"{url}{basename}{incremental}.filelist.csv" for incremental in incrementals
517
- # ],
518
- # "incremental_archives": [f"{url}{basename}{incremental}.tar.gz" for incremental in incrementals],
519
- # }
520
- # paths = dl_manager.download(incremental_urls)
521
- # incremental_paths["incremental_file_lists"].extend(paths["incremental_file_lists"])
522
- # incremental_paths["incremental_archives"].extend(paths["incremental_archives"])
 
523
 
524
  return [
525
  datasets.SplitGenerator(
@@ -528,15 +527,13 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
528
  "baseline_file_lists": baseline_file_lists,
529
  "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives],
530
  "baseline_package_list": baseline_package_list,
531
- # "incremental_file_lists": incremental_paths["incremental_file_lists"],
532
- # "incremental_archives": [
533
- # dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]
534
- # ],
535
  },
536
  ),
537
  ]
538
 
539
- def _generate_examples(self, baseline_file_lists, baseline_archives, baseline_package_list): #, incremental_file_lists, incremental_archives
540
  #Loading the file listing folders of individual PMC Article package (with medias and graphics)
541
  oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID")
542
  oa_package_list = oa_package_list[["File"]]
 
488
 
489
  for baseline in baselines:
490
  baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv"
 
 
 
 
491
  baseline_archive_url = f"{url}{basename}{baseline}.tar.gz"
492
  try:
493
+ baseline_file_list = dl_manager.download(baseline_file_list_url)
494
  baseline_archive = dl_manager.download(baseline_archive_url)
495
+ except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
496
  continue
497
+
498
  baseline_file_lists.append(baseline_file_list)
499
  baseline_archives.append(baseline_archive)
500
 
 
503
  # Incremental commented because some articles are already in the main parts (updates?)
504
  # Need to find a way to add them to the dataset without duplicating the articles.
505
  # Also adding them would mean that each new day the dataset is loaded, the whole dataset is recreated.
506
+ date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
507
+ incremental_dates = [
508
+ (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat()
509
+ for i in range(date_delta.days)
510
+ ]
511
+ incrementals = [f"incr.{date}" for date in incremental_dates]
512
+ for incremental in incrementals:
513
+ incremental_file_list_url = f"{url}{basename}{incremental}.filelist.csv"
514
+ incremental_archive_url = f"{url}{basename}{incremental}.tar.gz"
515
+ try:
516
+ incremental_file_list = dl_manager.download(incremental_file_list_url)
517
+ incremental_archive = dl_manager.download(incremental_archive_url)
518
+ except FileNotFoundError: # Some increment might not exist
519
+ continue
520
+ incremental_paths["incremental_file_lists"].append(incremental_file_list)
521
+ incremental_paths["incremental_archives"].append(incremental_archive)
522
 
523
  return [
524
  datasets.SplitGenerator(
 
527
  "baseline_file_lists": baseline_file_lists,
528
  "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives],
529
  "baseline_package_list": baseline_package_list,
530
+ "incremental_file_lists": incremental_paths["incremental_file_lists"],
531
+ "incremental_archives": [dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]],
 
 
532
  },
533
  ),
534
  ]
535
 
536
+ def _generate_examples(self, baseline_file_lists, baseline_archives, baseline_package_list, incremental_file_lists, incremental_archives):
537
  #Loading the file listing folders of individual PMC Article package (with medias and graphics)
538
  oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID")
539
  oa_package_list = oa_package_list[["File"]]