Datasets:
File size: 5,270 Bytes
9a70efe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import os
import requests
import time
import zipfile
import glob
from hashlib import md5
import concurrent.futures
base_url = "https://huggingface.co/datasets/imageomics/KABR/resolve/main/KABR"
"""
To extend the dataset, add additional animals and parts ranges to the list and dictionary below.
"""
animals = ["giraffes", "zebras_grevys", "zebras_plains"]
animal_parts_range = {
"giraffes": ("aa", "ad"),
"zebras_grevys": ("aa", "am"),
"zebras_plains": ("aa", "al"),
}
dataset_prefix = "dataset/image/"
# Define the static files that are not dependent on the animals list
static_files = [
"README.txt",
"annotation/classes.json",
"annotation/distribution.xlsx",
"annotation/train.csv",
"annotation/val.csv",
"configs/I3D.yaml",
"configs/SLOWFAST.yaml",
"configs/X3D.yaml",
"dataset/image2video.py",
"dataset/image2visual.py",
]
def generate_part_files(animal, start, end):
start_a, start_b = ord(start[0]), ord(start[1])
end_a, end_b = ord(end[0]), ord(end[1])
return [
f"{dataset_prefix}{animal}_part_{chr(a)}{chr(b)}"
for a in range(start_a, end_a + 1)
for b in range(start_b, end_b + 1)
]
# Generate the part files for each animal
part_files = [
part
for animal, (start, end) in animal_parts_range.items()
for part in generate_part_files(animal, start, end)
]
archive_md5_files = [f"{dataset_prefix}{animal}_md5.txt" for animal in animals]
files = static_files + archive_md5_files + part_files
def progress_bar(iteration, total, message, bar_length=50):
progress = (iteration / total)
bar = '=' * int(round(progress * bar_length) - 1)
spaces = ' ' * (bar_length - len(bar))
message = f'{message:<100}'
print(f'[{bar + spaces}] {int(progress * 100)}% {message}', end='\r', flush=True)
if iteration == total:
print()
# Directory to save files
save_dir = "KABR_files"
# Loop through each relative file path
print(f"Downloading the Kenyan Animal Behavior Recognition (KABR) dataset ...")
total = len(files)
for i, file_path in enumerate(files):
# Construct the full URL
save_path = os.path.join(save_dir, file_path)
if os.path.exists(save_path):
print(f"File {save_path} already exists. Skipping download.")
continue
full_url = f"{base_url}/{file_path}"
# Create the necessary directories based on the file path
os.makedirs(os.path.join(save_dir, os.path.dirname(file_path)), exist_ok=True)
# Download the file and save it with the preserved file path
response = requests.get(full_url)
with open(save_path, 'wb') as file:
file.write(response.content)
progress_bar(i+1, total, f"downloaded: {save_path}")
print("Download of repository contents completed.")
print(f"Concatenating split files into a full archive for {animals} ...")
def concatenate_files(animal):
print(f"Concatenating files for {animal} ...")
part_files_pattern = f"{save_dir}/dataset/image/{animal}_part_*"
part_files = sorted(glob.glob(part_files_pattern))
if part_files:
with open(f"{save_dir}/dataset/image/{animal}.zip", 'wb') as f_out:
for f_name in part_files:
with open(f_name, 'rb') as f_in:
# Read and write in chunks
CHUNK_SIZE = 8*1024*1024 # 8MB
for chunk in iter(lambda: f_in.read(CHUNK_SIZE), b""):
f_out.write(chunk)
# Delete part files as they are concatenated
os.remove(f_name)
print(f"Archive for {animal} concatenated.")
else:
print(f"No part files found for {animal}.")
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(concatenate_files, animals)
def compute_md5(file_path):
hasher = md5()
with open(file_path, 'rb') as f:
CHUNK_SIZE = 8*1024*1024 # 8MB
for chunk in iter(lambda: f.read(CHUNK_SIZE), b""):
hasher.update(chunk)
return hasher.hexdigest()
def verify_and_extract(animal):
print(f"Confirming data integrity for {animal}.zip ...")
zip_md5 = compute_md5(f"{save_dir}/dataset/image/{animal}.zip")
with open(f"{save_dir}/dataset/image/{animal}_md5.txt", 'r') as file:
expected_md5 = file.read().strip().split()[0]
if zip_md5 == expected_md5:
print(f"MD5 sum for {animal}.zip is correct.")
print(f"Extracting {animal}.zip ...")
with zipfile.ZipFile(f"{save_dir}/dataset/image/{animal}.zip", 'r') as zip_ref:
zip_ref.extractall(f"{save_dir}/dataset/image/")
print(f"{animal}.zip extracted.")
print(f"Cleaning up for {animal} ...")
os.remove(f"{save_dir}/dataset/image/{animal}.zip")
os.remove(f"{save_dir}/dataset/image/{animal}_md5.txt")
else:
print(f"MD5 sum for {animal}.zip is incorrect. Expected: {expected_md5}, but got: {zip_md5}.")
print("There may be data corruption. Please try to download and reconstruct the data again or reach out to the corresponding authors for assistance.")
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(verify_and_extract, animals)
print("Download script finished.") |