# Modified code from https://huggingface.co/datasets/imageomics/Comparison-Subset-Jiggins/blob/main/scripts/download_jiggins_subset.py # For downloading Jiggins images from any of the master CSV files # Generates Checksum file for all images download # logs image download in json file import requests import shutil import json import pandas as pd from checksum import get_checksums from tqdm import tqdm import os import argparse def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--csv", required=True, help="Path to CSV file with urls.", nargs="?") parser.add_argument("--output", required=True, help="Main directory to download images into.", nargs="?") return parser.parse_args() def update_log(log_data, index, image, url, response_code): # log status log_entry = {} log_entry["Image"] = image log_entry["zenodo_link"] = url log_entry["Response_status"] = response_code log_data[index] = log_entry return log_data def download_images(csv_path, image_folder, log_filepath): #load csv jiggins_data = pd.read_csv(csv_path) log_data = {} for i in tqdm(range(0, len(jiggins_data))) : species = jiggins_data["Taxonomic_Name"][i] image_name = jiggins_data["X"][i].astype(str) + "_" + jiggins_data["Image_name"][i] #download the image from url is not already downloaded if os.path.exists(f"{image_folder}/{species}/{image_name}") != True: #get image from url url = jiggins_data["zenodo_link"][i] response = requests.get(url, stream=True) # log status log_data = update_log(log_data, index = i, image = species + "/" + image_name, url = url, response_code = response.status_code ) #create the species appropriate folder if necessary if os.path.exists(f"{image_folder}/{species}") != True: os.makedirs(f"{image_folder}/{species}", exist_ok=False) #download the image if response.status_code == 200: with open(f"{image_folder}/{species}/{image_name}", "wb") as out_file: shutil.copyfileobj(response.raw, out_file) del response with open(log_filepath, "w") as log_file: json.dump(log_data, log_file, indent = 4) return def main(): #get arguments from commandline args = parse_args() csv_path = args.csv #path to our csv with urls to download images from image_folder = args.output #folder where dataset will be downloaded to # log file location log_filepath = csv_path.split(".")[0] + "_log.json" #dowload images from urls download_images(csv_path, image_folder, log_filepath) # generate checksums and save CSV to same folder as CSV used for download checksum_path = csv_path.split(".")[0] + "_checksums.csv" get_checksums(image_folder, checksum_path) print(f"Images downloaded from {csv_path} to {image_folder}.") print(f"Checksums recorded in {checksum_path} and download log is in {log_filepath}.") return if __name__ == "__main__": main()