Files changed (1) hide show
  1. download.py +154 -0
download.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import time
4
+ import zipfile
5
+ import glob
6
+ from hashlib import md5
7
+ import concurrent.futures
8
+
9
+ base_url = "https://huggingface.co/datasets/imageomics/KABR/resolve/main/KABR"
10
+
11
+ """
12
+ To extend the dataset, add additional animals and parts ranges to the list and dictionary below.
13
+ """
14
+
15
+ animals = ["giraffes", "zebras_grevys", "zebras_plains"]
16
+
17
+ animal_parts_range = {
18
+ "giraffes": ("aa", "ad"),
19
+ "zebras_grevys": ("aa", "am"),
20
+ "zebras_plains": ("aa", "al"),
21
+ }
22
+
23
+ dataset_prefix = "dataset/image/"
24
+
25
+ # Define the static files that are not dependent on the animals list
26
+ static_files = [
27
+ "README.txt",
28
+ "annotation/classes.json",
29
+ "annotation/distribution.xlsx",
30
+ "annotation/train.csv",
31
+ "annotation/val.csv",
32
+ "configs/I3D.yaml",
33
+ "configs/SLOWFAST.yaml",
34
+ "configs/X3D.yaml",
35
+ "dataset/image2video.py",
36
+ "dataset/image2visual.py",
37
+ ]
38
+
39
+ def generate_part_files(animal, start, end):
40
+ start_a, start_b = ord(start[0]), ord(start[1])
41
+ end_a, end_b = ord(end[0]), ord(end[1])
42
+ return [
43
+ f"{dataset_prefix}{animal}_part_{chr(a)}{chr(b)}"
44
+ for a in range(start_a, end_a + 1)
45
+ for b in range(start_b, end_b + 1)
46
+ ]
47
+
48
+ # Generate the part files for each animal
49
+ part_files = [
50
+ part
51
+ for animal, (start, end) in animal_parts_range.items()
52
+ for part in generate_part_files(animal, start, end)
53
+ ]
54
+
55
+ archive_md5_files = [f"{dataset_prefix}{animal}_md5.txt" for animal in animals]
56
+
57
+ files = static_files + archive_md5_files + part_files
58
+
59
+ def progress_bar(iteration, total, message, bar_length=50):
60
+ progress = (iteration / total)
61
+ bar = '=' * int(round(progress * bar_length) - 1)
62
+ spaces = ' ' * (bar_length - len(bar))
63
+ message = f'{message:<100}'
64
+ print(f'[{bar + spaces}] {int(progress * 100)}% {message}', end='\r', flush=True)
65
+
66
+ if iteration == total:
67
+ print()
68
+
69
+ # Directory to save files
70
+ save_dir = "KABR_files"
71
+
72
+ # Loop through each relative file path
73
+
74
+ print(f"Downloading the Kenyan Animal Behavior Recognition (KABR) dataset ...")
75
+
76
+ total = len(files)
77
+ for i, file_path in enumerate(files):
78
+ # Construct the full URL
79
+ save_path = os.path.join(save_dir, file_path)
80
+
81
+ if os.path.exists(save_path):
82
+ print(f"File {save_path} already exists. Skipping download.")
83
+ continue
84
+
85
+ full_url = f"{base_url}/{file_path}"
86
+
87
+ # Create the necessary directories based on the file path
88
+ os.makedirs(os.path.join(save_dir, os.path.dirname(file_path)), exist_ok=True)
89
+
90
+ # Download the file and save it with the preserved file path
91
+ response = requests.get(full_url)
92
+ with open(save_path, 'wb') as file:
93
+ file.write(response.content)
94
+
95
+ progress_bar(i+1, total, f"downloaded: {save_path}")
96
+
97
+ print("Download of repository contents completed.")
98
+
99
+ print(f"Concatenating split files into a full archive for {animals} ...")
100
+
101
+ def concatenate_files(animal):
102
+ print(f"Concatenating files for {animal} ...")
103
+ part_files_pattern = f"{save_dir}/dataset/image/{animal}_part_*"
104
+ part_files = sorted(glob.glob(part_files_pattern))
105
+ if part_files:
106
+ with open(f"{save_dir}/dataset/image/{animal}.zip", 'wb') as f_out:
107
+ for f_name in part_files:
108
+ with open(f_name, 'rb') as f_in:
109
+ # Read and write in chunks
110
+ CHUNK_SIZE = 8*1024*1024 # 8MB
111
+ for chunk in iter(lambda: f_in.read(CHUNK_SIZE), b""):
112
+ f_out.write(chunk)
113
+ # Delete part files as they are concatenated
114
+ os.remove(f_name)
115
+ print(f"Archive for {animal} concatenated.")
116
+ else:
117
+ print(f"No part files found for {animal}.")
118
+
119
+ with concurrent.futures.ThreadPoolExecutor() as executor:
120
+ executor.map(concatenate_files, animals)
121
+
122
+ def compute_md5(file_path):
123
+ hasher = md5()
124
+ with open(file_path, 'rb') as f:
125
+ CHUNK_SIZE = 8*1024*1024 # 8MB
126
+ for chunk in iter(lambda: f.read(CHUNK_SIZE), b""):
127
+ hasher.update(chunk)
128
+ return hasher.hexdigest()
129
+
130
+ def verify_and_extract(animal):
131
+ print(f"Confirming data integrity for {animal}.zip ...")
132
+ zip_md5 = compute_md5(f"{save_dir}/dataset/image/{animal}.zip")
133
+
134
+ with open(f"{save_dir}/dataset/image/{animal}_md5.txt", 'r') as file:
135
+ expected_md5 = file.read().strip().split()[0]
136
+
137
+ if zip_md5 == expected_md5:
138
+ print(f"MD5 sum for {animal}.zip is correct.")
139
+
140
+ print(f"Extracting {animal}.zip ...")
141
+ with zipfile.ZipFile(f"{save_dir}/dataset/image/{animal}.zip", 'r') as zip_ref:
142
+ zip_ref.extractall(f"{save_dir}/dataset/image/")
143
+ print(f"{animal}.zip extracted.")
144
+ print(f"Cleaning up for {animal} ...")
145
+ os.remove(f"{save_dir}/dataset/image/{animal}.zip")
146
+ os.remove(f"{save_dir}/dataset/image/{animal}_md5.txt")
147
+ else:
148
+ print(f"MD5 sum for {animal}.zip is incorrect. Expected: {expected_md5}, but got: {zip_md5}.")
149
+ print("There may be data corruption. Please try to download and reconstruct the data again or reach out to the corresponding authors for assistance.")
150
+
151
+ with concurrent.futures.ThreadPoolExecutor() as executor:
152
+ executor.map(verify_and_extract, animals)
153
+
154
+ print("Download script finished.")