|
|
|
|
|
""" |
|
|
Unpack Cloud4D dataset archives. |
|
|
|
|
|
This script extracts all tar.gz archives to reconstruct the original |
|
|
Cloud4D directory structure. |
|
|
|
|
|
Usage: |
|
|
python unpack.py [--output /path/to/output] [--subset real_world|synthetic] [--jobs N] |
|
|
|
|
|
Examples: |
|
|
# Extract everything to ./ |
|
|
python unpack.py |
|
|
|
|
|
# Extract to a specific location |
|
|
python unpack.py --output /data/cloud4d |
|
|
|
|
|
# Extract only real_world data |
|
|
python unpack.py --subset real_world |
|
|
|
|
|
# Extract only synthetic data |
|
|
python unpack.py --subset synthetic |
|
|
|
|
|
# Extract a specific date-hour (real_world) |
|
|
python unpack.py --filter 20230705_10 |
|
|
|
|
|
# Use parallel extraction |
|
|
python unpack.py --jobs 4 |
|
|
""" |
|
|
|
|
|
import argparse |
|
|
import subprocess |
|
|
import sys |
|
|
import tarfile |
|
|
from pathlib import Path |
|
|
from concurrent.futures import ProcessPoolExecutor, as_completed |
|
|
|
|
|
|
|
|
def extract_archive(archive_path, output_dir): |
|
|
"""Extract a tar.gz archive to the output directory.""" |
|
|
archive_path = Path(archive_path) |
|
|
output_dir = Path(output_dir) |
|
|
|
|
|
|
|
|
try: |
|
|
subprocess.run(['which', 'pigz'], check=True, capture_output=True) |
|
|
cmd = f'pigz -dc "{archive_path}" | tar -xf - -C "{output_dir}"' |
|
|
subprocess.run(cmd, shell=True, check=True) |
|
|
return True |
|
|
except (subprocess.CalledProcessError, FileNotFoundError): |
|
|
pass |
|
|
|
|
|
|
|
|
try: |
|
|
cmd = ['tar', '-xzf', str(archive_path), '-C', str(output_dir)] |
|
|
subprocess.run(cmd, check=True) |
|
|
return True |
|
|
except subprocess.CalledProcessError: |
|
|
pass |
|
|
|
|
|
|
|
|
try: |
|
|
with tarfile.open(archive_path, 'r:gz') as tar: |
|
|
tar.extractall(output_dir) |
|
|
return True |
|
|
except Exception as e: |
|
|
print(f"Error extracting {archive_path}: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
def extract_single(args): |
|
|
"""Worker function for parallel extraction.""" |
|
|
archive_path, output_dir, name = args |
|
|
try: |
|
|
success = extract_archive(archive_path, output_dir) |
|
|
if success: |
|
|
return (name, 'extracted') |
|
|
else: |
|
|
return (name, 'failed') |
|
|
except Exception as e: |
|
|
return (name, f'error: {e}') |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser( |
|
|
description='Unpack Cloud4D dataset archives', |
|
|
formatter_class=argparse.RawDescriptionHelpFormatter, |
|
|
epilog=""" |
|
|
Examples: |
|
|
python unpack.py # Extract all to ./ |
|
|
python unpack.py --output /data/cloud4D # Extract to specific location |
|
|
python unpack.py --subset real_world # Extract only real_world |
|
|
python unpack.py --filter 20230705 # Extract matching archives |
|
|
python unpack.py --jobs 4 # Parallel extraction |
|
|
""" |
|
|
) |
|
|
parser.add_argument('--output', '-o', type=Path, default=Path('./'), |
|
|
help='Output directory (default: ./)') |
|
|
parser.add_argument('--subset', choices=['real_world', 'synthetic'], |
|
|
help='Extract only a specific subset') |
|
|
parser.add_argument('--filter', type=str, |
|
|
help='Filter archives by name (e.g., "20230705" for a specific date)') |
|
|
parser.add_argument('--jobs', '-j', type=int, default=1, |
|
|
help='Number of parallel extraction jobs (default: 1)') |
|
|
parser.add_argument('--list', '-l', action='store_true', |
|
|
help='List available archives without extracting') |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
script_dir = Path(__file__).parent.resolve() |
|
|
|
|
|
|
|
|
archives = [] |
|
|
|
|
|
|
|
|
real_world_dir = script_dir / 'real_world' |
|
|
if real_world_dir.exists() and args.subset in (None, 'real_world'): |
|
|
for archive in sorted(real_world_dir.glob('*.tar.gz')): |
|
|
if args.filter is None or args.filter in archive.name: |
|
|
archives.append(('real_world', archive)) |
|
|
|
|
|
|
|
|
synthetic_dir = script_dir / 'synthetic' |
|
|
if synthetic_dir.exists() and args.subset in (None, 'synthetic'): |
|
|
for archive in sorted(synthetic_dir.glob('*.tar.gz')): |
|
|
if args.filter is None or args.filter in archive.name: |
|
|
archives.append(('synthetic', archive)) |
|
|
|
|
|
if not archives: |
|
|
print("No archives found matching the criteria.") |
|
|
print(f"Searched in: {script_dir}") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
if args.list: |
|
|
print("Available archives:") |
|
|
print() |
|
|
current_subset = None |
|
|
for subset, archive in archives: |
|
|
if subset != current_subset: |
|
|
print(f" {subset}/") |
|
|
current_subset = subset |
|
|
size_mb = archive.stat().st_size / 1024 / 1024 |
|
|
print(f" {archive.name} ({size_mb:.1f} MB)") |
|
|
print() |
|
|
total_size = sum(a.stat().st_size for _, a in archives) / 1024 / 1024 / 1024 |
|
|
print(f"Total: {len(archives)} archives, {total_size:.2f} GB") |
|
|
return |
|
|
|
|
|
|
|
|
output_dir = args.output.resolve() |
|
|
|
|
|
print("=" * 70) |
|
|
print("Cloud4D Dataset Unpacker") |
|
|
print("=" * 70) |
|
|
print(f"Output directory: {output_dir}") |
|
|
print(f"Archives to extract: {len(archives)}") |
|
|
if args.subset: |
|
|
print(f"Subset: {args.subset}") |
|
|
if args.filter: |
|
|
print(f"Filter: {args.filter}") |
|
|
print() |
|
|
|
|
|
|
|
|
(output_dir / 'real_world').mkdir(parents=True, exist_ok=True) |
|
|
(output_dir / 'synthetic').mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
tasks = [] |
|
|
for subset, archive in archives: |
|
|
target_dir = output_dir / subset |
|
|
name = f"{subset}/{archive.stem}" |
|
|
tasks.append((archive, target_dir, name)) |
|
|
|
|
|
|
|
|
print("Extracting archives...") |
|
|
results = [] |
|
|
|
|
|
if args.jobs > 1: |
|
|
with ProcessPoolExecutor(max_workers=args.jobs) as executor: |
|
|
futures = {executor.submit(extract_single, task): task[2] for task in tasks} |
|
|
for future in as_completed(futures): |
|
|
name, status = future.result() |
|
|
results.append((name, status)) |
|
|
print(f" [{status.upper()}] {name}") |
|
|
else: |
|
|
for task in tasks: |
|
|
name, status = extract_single(task) |
|
|
results.append((name, status)) |
|
|
print(f" [{status.upper()}] {name}") |
|
|
|
|
|
|
|
|
print() |
|
|
print("=" * 70) |
|
|
print("EXTRACTION COMPLETE") |
|
|
print("=" * 70) |
|
|
|
|
|
extracted = sum(1 for _, s in results if s == 'extracted') |
|
|
failed = sum(1 for _, s in results if s != 'extracted') |
|
|
|
|
|
print(f"Successfully extracted: {extracted}") |
|
|
if failed: |
|
|
print(f"Failed: {failed}") |
|
|
print() |
|
|
print(f"Dataset extracted to: {output_dir}") |
|
|
print() |
|
|
print("Directory structure:") |
|
|
print(f" {output_dir}/") |
|
|
print(f" real_world/") |
|
|
print(f" 20230705_10/") |
|
|
print(f" perspective_1/") |
|
|
print(f" perspective_2/") |
|
|
print(f" perspective_3/") |
|
|
print(f" ... (more date-hour folders)") |
|
|
print(f" synthetic/") |
|
|
print(f" terragen/") |
|
|
print(f" large_eddy_simulations/") |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
main() |
|
|
|