KABR / download.sh
thompsonmj's picture
Provide bash script for simple dataset access
547112a
raw
history blame
3.39 kB
#!/bin/bash
# Base URL of the Hugging Face repository
BASE_URL="https://huggingface.co/datasets/imageomics/KABR/resolve/main/KABR"
# Array of relative file paths
FILES=(
"README.txt"
"annotation/classes.json"
"annotation/distribution.xlsx"
"annotation/train.csv"
"annotation/val.csv"
"configs/I3D.yaml"
"configs/SLOWFAST.yaml"
"configs/X3D.yaml"
"dataset/image2video.py"
"dataset/image/giraffes_md5.txt"
"dataset/image/giraffes_part_aa"
"dataset/image/giraffes_part_ab"
"dataset/image/giraffes_part_ac"
"dataset/image/giraffes_part_ad"
"dataset/image/giraffes.zip"
"dataset/image/zebras_grevys_md5.txt"
"dataset/image/zebras_grevys_part_aa"
"dataset/image/zebras_grevys_part_ab"
"dataset/image/zebras_grevys_part_ac"
"dataset/image/zebras_grevys_part_ad"
"dataset/image/zebras_grevys_part_ae"
"dataset/image/zebras_grevys_part_af"
"dataset/image/zebras_grevys_part_ag"
"dataset/image/zebras_grevys_part_ah"
"dataset/image/zebras_grevys_part_ai"
"dataset/image/zebras_grevys_part_aj"
"dataset/image/zebras_grevys_part_ak"
"dataset/image/zebras_grevys_part_al"
"dataset/image/zebras_grevys_part_am"
"dataset/image/zebras_plains_md5.txt"
"dataset/image/zebras_plains_part_aa"
"dataset/image/zebras_plains_part_ab"
"dataset/image/zebras_plains_part_ac"
"dataset/image/zebras_plains_part_ad"
"dataset/image/zebras_plains_part_ae"
"dataset/image/zebras_plains_part_af"
"dataset/image/zebras_plains_part_ag"
"dataset/image/zebras_plains_part_ah"
"dataset/image/zebras_plains_part_ai"
"dataset/image/zebras_plains_part_aj"
"dataset/image/zebras_plains_part_ak"
"dataset/image/zebras_plains_part_al"
)
# Loop through each relative file path
for FILE_PATH in "${FILES[@]}"; do
# Construct the full URL
FULL_URL="$BASE_URL/$FILE_PATH"
# Create the necessary directories based on the file path
mkdir -p "$(dirname "KABR_files/$FILE_PATH")"
# Download the file and save it with the preserved file path
wget -P "KABR_files/$(dirname "$FILE_PATH")" "$FULL_URL"
done
ANIMALS=("giraffes" "zebras_grevys" "zebras_plains")
# Loop through each animal name
for ANIMAL in "${ANIMALS[@]}"; do
# Concatenate the split files into their archive.
PART_FILES="./KABR_files/dataset/image/${ANIMAL}_part_*"
if ls $PART_FILES 1> /dev/null 2>&1; then
cat $PART_FILES > "./KABR_files/dataset/image/${ANIMAL}.zip"
else
echo "No part files found for $ANIMAL."
continue
fi
# Calculate the MD5 sum of the ZIP file
ZIP_MD5=$(md5sum "./KABR_files/dataset/image/${ANIMAL}.zip" | awk '{ print $1 }')
# Read the expected MD5 sum from the associated txt file
EXPECTED_MD5=$(cat "./KABR_files/dataset/image/${ANIMAL}_md5.txt" | awk '{ print $1 }')
# Compare the calculated MD5 sum with the expected MD5 sum
if [ "$ZIP_MD5" == "$EXPECTED_MD5" ]; then
echo "MD5 sum for ${ANIMAL}.zip is correct."
# Delete the part files
rm $PART_FILES
unzip -d "./KABR_files/dataset/image/" "./KABR_files/dataset/image/${ANIMAL}.zip"
rm "./KABR_files/dataset/image/${ANIMAL}.zip"
rm "./KABR_files/dataset/image/${ANIMAL}_md5.txt"
else
echo "MD5 sum for ${ANIMAL}.zip is incorrect. Expected: $EXPECTED_MD5, but got: $ZIP_MD5."
echo "There may be data corruption. Please try to download and reconstruct the data again or reach out to the corresponding authors for assistance."
fi
done
echo "Download, reconstruction, extraction, and verification completed."