egrace479 commited on
Commit
5253c8c
1 Parent(s): 7fa480b

Update download script with better logging and retry loop

Browse files
Files changed (1) hide show
  1. scripts/download_jiggins_subset.py +137 -34
scripts/download_jiggins_subset.py CHANGED
@@ -1,7 +1,11 @@
1
- # Modified code from https://huggingface.co/datasets/imageomics/Comparison-Subset-Jiggins/blob/main/scripts/download_jiggins_subset.py
2
- # For downloading Jiggins images from any of the master CSV files
3
- # Generates Checksum file for all images download
4
- # logs image download in json file
 
 
 
 
5
 
6
  import requests
7
  import shutil
@@ -12,9 +16,26 @@ from checksum import get_checksums
12
 
13
  from tqdm import tqdm
14
  import os
 
 
15
  import argparse
16
 
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  def parse_args():
19
  parser = argparse.ArgumentParser()
20
  parser.add_argument("--csv", required=True, help="Path to CSV file with urls.", nargs="?")
@@ -23,52 +44,122 @@ def parse_args():
23
  return parser.parse_args()
24
 
25
 
26
- def update_log(log_data, index, image, url, response_code):
27
  # log status
28
  log_entry = {}
29
  log_entry["Image"] = image
30
- log_entry["zenodo_link"] = url
31
- log_entry["Response_status"] = response_code
 
 
 
32
  log_data[index] = log_entry
33
 
34
  return log_data
35
 
36
 
37
- def download_images(csv_path, image_folder, log_filepath):
38
- #load csv
39
- jiggins_data = pd.read_csv(csv_path)
 
 
 
 
 
40
  log_data = {}
 
41
 
42
  for i in tqdm(range(0, len(jiggins_data))) :
 
43
  species = jiggins_data["Taxonomic_Name"][i]
44
  image_name = jiggins_data["X"][i].astype(str) + "_" + jiggins_data["Image_name"][i]
 
45
 
46
- #download the image from url is not already downloaded
 
47
  if os.path.exists(f"{image_folder}/{species}/{image_name}") != True:
48
  #get image from url
49
- url = jiggins_data["zenodo_link"][i]
50
- response = requests.get(url, stream=True)
51
-
52
- # log status
53
- log_data = update_log(log_data,
54
- index = i,
55
- image = species + "/" + image_name,
56
- url = url,
57
- response_code = response.status_code
58
- )
59
-
60
- #create the species appropriate folder if necessary
61
- if os.path.exists(f"{image_folder}/{species}") != True:
62
- os.makedirs(f"{image_folder}/{species}", exist_ok=False)
63
 
64
  #download the image
65
- if response.status_code == 200:
66
- with open(f"{image_folder}/{species}/{image_name}", "wb") as out_file:
67
- shutil.copyfileobj(response.raw, out_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  del response
69
 
70
- with open(log_filepath, "w") as log_file:
71
- json.dump(log_data, log_file, indent = 4)
 
 
72
 
73
  return
74
 
@@ -79,20 +170,32 @@ def main():
79
  csv_path = args.csv #path to our csv with urls to download images from
80
  image_folder = args.output #folder where dataset will be downloaded to
81
 
82
- # log file location
83
  log_filepath = csv_path.split(".")[0] + "_log.json"
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
  #dowload images from urls
86
- download_images(csv_path, image_folder, log_filepath)
87
 
88
  # generate checksums and save CSV to same folder as CSV used for download
89
  checksum_path = csv_path.split(".")[0] + "_checksums.csv"
90
  get_checksums(image_folder, checksum_path)
91
 
92
  print(f"Images downloaded from {csv_path} to {image_folder}.")
93
- print(f"Checksums recorded in {checksum_path} and download log is in {log_filepath}.")
94
 
95
  return
96
 
97
  if __name__ == "__main__":
98
- main()
 
1
+ # Built on Michelle's download script: https://huggingface.co/datasets/imageomics/Comparison-Subset-Jiggins/blob/977a934e1eef18f6b6152da430ac83ba6f7bd30f/download_jiggins_subset.py
2
+ # with modification of David's redo loop: https://github.com/Imageomics/data-fwg/blob/anomaly-data-challenge/HDR-anomaly-data-challenge/notebooks/download_images.ipynb
3
+ # and expanded logging and file checks. Further added checksum calculation for all downloaded images at end.
4
+
5
+ # Script to download Jiggins images from any of the master CSV files.
6
+ # Generates Checksum file for all images downloaded (<master filename>_checksums.csv).
7
+ # Logs image downloads and failures in json files (<master filename>_log.json & <master filename>_error_log.json).
8
+ # Logs record numbers and response codes as strings, not int64.
9
 
10
  import requests
11
  import shutil
 
16
 
17
  from tqdm import tqdm
18
  import os
19
+ import sys
20
+ import time
21
  import argparse
22
 
23
 
24
+ EXPECTED_COLS = ["CAMID",
25
+ "X",
26
+ "Image_name",
27
+ "file_url",
28
+ "Taxonomic_Name",
29
+ "record_number",
30
+ "Dataset"
31
+ ]
32
+
33
+ REDO_CODE_LIST = [429, 500, 502, 503, 504]
34
+
35
+ # Reset to appropriate index if download gets interrupted.
36
+ STARTING_INDEX = 0
37
+
38
+
39
  def parse_args():
40
  parser = argparse.ArgumentParser()
41
  parser.add_argument("--csv", required=True, help="Path to CSV file with urls.", nargs="?")
 
44
  return parser.parse_args()
45
 
46
 
47
+ def log_response(log_data, index, image, url, record_number, dataset, cam_id, response_code):
48
  # log status
49
  log_entry = {}
50
  log_entry["Image"] = image
51
+ log_entry["file_url"] = url
52
+ log_entry["record_number"] = str(record_number) #int64 has problems sometimes
53
+ log_entry["dataset"] = dataset
54
+ log_entry["CAMID"] = cam_id
55
+ log_entry["Response_status"] = str(response_code)
56
  log_data[index] = log_entry
57
 
58
  return log_data
59
 
60
 
61
+ def update_log(log, index, filepath):
62
+ # save logs
63
+ with open(filepath, "a") as log_file:
64
+ json.dump(log[index], log_file, indent = 4)
65
+ log_file.write("\n")
66
+
67
+
68
+ def download_images(jiggins_data, image_folder, log_filepath, error_log_filepath):
69
  log_data = {}
70
+ log_errors = {}
71
 
72
  for i in tqdm(range(0, len(jiggins_data))) :
73
+ # species will really be <Genus> <species> ssp. <subspecies>, where subspecies indicated
74
  species = jiggins_data["Taxonomic_Name"][i]
75
  image_name = jiggins_data["X"][i].astype(str) + "_" + jiggins_data["Image_name"][i]
76
+ record_number = jiggins_data["record_number"][i]
77
 
78
+ # download the image from url if not already downloaded
79
+ # Will attempt to download everything in CSV (image_name is unique: <X>_<Image_name>), unless download restarted
80
  if os.path.exists(f"{image_folder}/{species}/{image_name}") != True:
81
  #get image from url
82
+ url = jiggins_data["file_url"][i]
83
+ dataset = jiggins_data["Dataset"][i]
84
+ cam_id = jiggins_data["CAMID"][i]
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  #download the image
87
+ redo = True
88
+ max_redos = 2
89
+ while redo and max_redos > 0:
90
+ try:
91
+ response = requests.get(url, stream=True)
92
+ except Exception as e:
93
+ redo = True
94
+ max_redos -= 1
95
+ if max_redos <= 0:
96
+ log_errors = log_response(log_errors,
97
+ index = i,
98
+ image = species + "/" + image_name,
99
+ url = url,
100
+ record_number = record_number,
101
+ dataset = dataset,
102
+ cam_id = cam_id,
103
+ response_code = str(e))
104
+ update_log(log = log_errors, index = i, filepath = error_log_filepath)
105
+
106
+ if response.status_code == 200:
107
+ redo = False
108
+ # log status
109
+ log_data = log_response(log_data,
110
+ index = i,
111
+ image = species + "/" + image_name,
112
+ url = url,
113
+ record_number = record_number,
114
+ dataset = dataset,
115
+ cam_id = cam_id,
116
+ response_code = response.status_code
117
+ )
118
+ update_log(log = log_data, index = i, filepath = log_filepath)
119
+
120
+ #create the species appropriate folder if necessary
121
+ if os.path.exists(f"{image_folder}/{species}") != True:
122
+ os.makedirs(f"{image_folder}/{species}", exist_ok=False)
123
+
124
+ # save image to appropriate folder
125
+ with open(f"{image_folder}/{species}/{image_name}", "wb") as out_file:
126
+ shutil.copyfileobj(response.raw, out_file)
127
+
128
+ # check for too many requests
129
+ elif response.status_code in REDO_CODE_LIST:
130
+ redo = True
131
+ max_redos -= 1
132
+ if max_redos <= 0:
133
+ log_errors = log_response(log_errors,
134
+ index = i,
135
+ image = species + "/" + image_name,
136
+ url = url,
137
+ record_number = record_number,
138
+ dataset = dataset,
139
+ cam_id = cam_id,
140
+ response_code = response.status_code)
141
+ update_log(log = log_errors, index = i, filepath = error_log_filepath)
142
+
143
+ else:
144
+ time.sleep(1)
145
+ else: #other fail, eg. 404
146
+ redo = False
147
+ log_errors = log_response(log_errors,
148
+ index = i,
149
+ image = species + "/" + image_name,
150
+ url = url,
151
+ record_number = record_number,
152
+ dataset = dataset,
153
+ cam_id = cam_id,
154
+ response_code = response.status_code)
155
+ update_log(log = log_errors, index = i, filepath = error_log_filepath)
156
+
157
  del response
158
 
159
+ else:
160
+ if i > STARTING_INDEX:
161
+ # No need to print if download is restarted due to interruption (set STARTING_INDEX accordingly).
162
+ print(f"duplicate image: {jiggins_data['X']}, {jiggins_data['Image_name']}, from record {record_number}")
163
 
164
  return
165
 
 
170
  csv_path = args.csv #path to our csv with urls to download images from
171
  image_folder = args.output #folder where dataset will be downloaded to
172
 
173
+ # log file location (folder of source CSV)
174
  log_filepath = csv_path.split(".")[0] + "_log.json"
175
+ error_log_filepath = csv_path.split(".")[0] + "_error_log.json"
176
+
177
+ #load csv
178
+ jiggins_data = pd.read_csv(csv_path, low_memory = False)
179
+
180
+ # Check for required columns
181
+ missing_cols = []
182
+ for col in EXPECTED_COLS:
183
+ if col not in list(jiggins_data.columns):
184
+ missing_cols.append(col)
185
+ if len(missing_cols) > 0:
186
+ sys.exit(f"The CSV is missing column(s): {missing_cols}")
187
 
188
  #dowload images from urls
189
+ download_images(jiggins_data, image_folder, log_filepath, error_log_filepath)
190
 
191
  # generate checksums and save CSV to same folder as CSV used for download
192
  checksum_path = csv_path.split(".")[0] + "_checksums.csv"
193
  get_checksums(image_folder, checksum_path)
194
 
195
  print(f"Images downloaded from {csv_path} to {image_folder}.")
196
+ print(f"Checksums recorded in {checksum_path} and download logs are in {log_filepath} and {error_log_filepath}.")
197
 
198
  return
199
 
200
  if __name__ == "__main__":
201
+ main()