KABR / download.py
thompsonmj's picture
Reduce memory usage and improve storage efficiency during dataset processing
3ed547e
raw
history blame
No virus
5.27 kB
import os
import requests
import time
import zipfile
import glob
from hashlib import md5
import concurrent.futures
base_url = "https://huggingface.co/datasets/imageomics/KABR/resolve/main/KABR"
"""
To extend the dataset, add additional animals and parts ranges to the list and dictionary below.
"""
animals = ["giraffes", "zebras_grevys", "zebras_plains"]
animal_parts_range = {
"giraffes": ("aa", "ad"),
"zebras_grevys": ("aa", "am"),
"zebras_plains": ("aa", "al"),
}
dataset_prefix = "dataset/image/"
# Define the static files that are not dependent on the animals list
static_files = [
"README.txt",
"annotation/classes.json",
"annotation/distribution.xlsx",
"annotation/train.csv",
"annotation/val.csv",
"configs/I3D.yaml",
"configs/SLOWFAST.yaml",
"configs/X3D.yaml",
"dataset/image2video.py",
"dataset/image2visual.py",
]
def generate_part_files(animal, start, end):
start_a, start_b = ord(start[0]), ord(start[1])
end_a, end_b = ord(end[0]), ord(end[1])
return [
f"{dataset_prefix}{animal}_part_{chr(a)}{chr(b)}"
for a in range(start_a, end_a + 1)
for b in range(start_b, end_b + 1)
]
# Generate the part files for each animal
part_files = [
part
for animal, (start, end) in animal_parts_range.items()
for part in generate_part_files(animal, start, end)
]
archive_md5_files = [f"{dataset_prefix}{animal}_md5.txt" for animal in animals]
files = static_files + archive_md5_files + part_files
def progress_bar(iteration, total, message, bar_length=50):
progress = (iteration / total)
bar = '=' * int(round(progress * bar_length) - 1)
spaces = ' ' * (bar_length - len(bar))
message = f'{message:<100}'
print(f'[{bar + spaces}] {int(progress * 100)}% {message}', end='\r', flush=True)
if iteration == total:
print()
# Directory to save files
save_dir = "KABR_files"
# Loop through each relative file path
print(f"Downloading the Kenyan Animal Behavior Recognition (KABR) dataset ...")
total = len(files)
for i, file_path in enumerate(files):
# Construct the full URL
save_path = os.path.join(save_dir, file_path)
if os.path.exists(save_path):
print(f"File {save_path} already exists. Skipping download.")
continue
full_url = f"{base_url}/{file_path}"
# Create the necessary directories based on the file path
os.makedirs(os.path.join(save_dir, os.path.dirname(file_path)), exist_ok=True)
# Download the file and save it with the preserved file path
response = requests.get(full_url)
with open(save_path, 'wb') as file:
file.write(response.content)
progress_bar(i+1, total, f"downloaded: {save_path}")
print("Download of repository contents completed.")
print(f"Concatenating split files into a full archive for {animals} ...")
def concatenate_files(animal):
print(f"Concatenating files for {animal} ...")
part_files_pattern = f"{save_dir}/dataset/image/{animal}_part_*"
part_files = sorted(glob.glob(part_files_pattern))
if part_files:
with open(f"{save_dir}/dataset/image/{animal}.zip", 'wb') as f_out:
for f_name in part_files:
with open(f_name, 'rb') as f_in:
# Read and write in chunks
CHUNK_SIZE = 8*1024*1024 # 8MB
for chunk in iter(lambda: f_in.read(CHUNK_SIZE), b""):
f_out.write(chunk)
# Delete part files as they are concatenated
os.remove(f_name)
print(f"Archive for {animal} concatenated.")
else:
print(f"No part files found for {animal}.")
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(concatenate_files, animals)
def compute_md5(file_path):
hasher = md5()
with open(file_path, 'rb') as f:
CHUNK_SIZE = 8*1024*1024 # 8MB
for chunk in iter(lambda: f.read(CHUNK_SIZE), b""):
hasher.update(chunk)
return hasher.hexdigest()
def verify_and_extract(animal):
print(f"Confirming data integrity for {animal}.zip ...")
zip_md5 = compute_md5(f"{save_dir}/dataset/image/{animal}.zip")
with open(f"{save_dir}/dataset/image/{animal}_md5.txt", 'r') as file:
expected_md5 = file.read().strip().split()[0]
if zip_md5 == expected_md5:
print(f"MD5 sum for {animal}.zip is correct.")
print(f"Extracting {animal}.zip ...")
with zipfile.ZipFile(f"{save_dir}/dataset/image/{animal}.zip", 'r') as zip_ref:
zip_ref.extractall(f"{save_dir}/dataset/image/")
print(f"{animal}.zip extracted.")
print(f"Cleaning up for {animal} ...")
os.remove(f"{save_dir}/dataset/image/{animal}.zip")
os.remove(f"{save_dir}/dataset/image/{animal}_md5.txt")
else:
print(f"MD5 sum for {animal}.zip is incorrect. Expected: {expected_md5}, but got: {zip_md5}.")
print("There may be data corruption. Please try to download and reconstruct the data again or reach out to the corresponding authors for assistance.")
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(verify_and_extract, animals)
print("Download script finished.")