SignVerse-2M / scripts /runtime_status.py
Sen Fang
Update pipeline orchestration and optimized processing
fa3502a
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import csv
import json
import re
import subprocess
from collections import Counter
from pathlib import Path
VIDEO_EXTS = {'.mp4', '.mkv', '.webm', '.mov'}
ARRAY_RANGE_RE = re.compile(r'^(\d+)_(\[(.+)\])$')
PROCESSED_REQUIRED_COLUMNS = {
'video_id',
'download_status',
'process_status',
'upload_status',
'archive_name',
}
def run_command(cmd: list[str]) -> str:
try:
proc = subprocess.run(cmd, check=False, capture_output=True, text=True)
except OSError:
return ''
return (proc.stdout or '').strip()
def count_claims(directory: Path) -> int:
if not directory.exists():
return 0
return sum(1 for _ in directory.glob('*.claim'))
def count_complete(dataset_dir: Path) -> int:
if not dataset_dir.exists():
return 0
return sum(1 for _ in dataset_dir.glob('*/npz/.complete'))
def sum_file_sizes(paths: list[Path]) -> int:
total = 0
for path in paths:
try:
total += path.stat().st_size
except FileNotFoundError:
continue
return total
def count_uploaded(progress_path: Path) -> tuple[int, int]:
if not progress_path.exists():
return 0, 0
try:
data = json.loads(progress_path.read_text())
except Exception:
return 0, 0
archives = data.get('archives', {})
uploaded_folders = data.get('uploaded_folders', {})
return len(archives), len(uploaded_folders)
def expand_task_count(jobid_token: str) -> int:
m = ARRAY_RANGE_RE.match(jobid_token)
if not m:
return 1
body = m.group(3)
if '%' in body:
body = body.split('%', 1)[0]
total = 0
for part in body.split(','):
part = part.strip()
if not part:
continue
if '-' in part:
a, b = part.split('-', 1)
try:
total += int(b) - int(a) + 1
except ValueError:
total += 1
else:
total += 1
return max(total, 1)
def queue_status(username: str) -> dict[str, object]:
output = run_command(['squeue', '-u', username, '-h', '-o', '%i|%j|%T|%P'])
job_counts: Counter[str] = Counter()
partition_counts: Counter[str] = Counter()
running_dwpose = 0
running_download = 0
pending_download = 0
if output:
for line in output.splitlines():
parts = line.split('|')
if len(parts) != 4:
continue
jobid_token, job, state, partition = parts
count = expand_task_count(jobid_token)
job_counts[f'{job}|{state}'] += count
partition_counts[f'{job}|{partition}|{state}'] += count
if job == 'dwpose' and state == 'RUNNING':
running_dwpose += count
if job == 'download' and state == 'RUNNING':
running_download += count
if job == 'download' and state in {'PENDING', 'CONFIGURING'}:
pending_download += count
total_download = running_download + pending_download
return {
'running_dwpose': running_dwpose,
'running_download': running_download,
'pending_download_jobs': pending_download,
'total_download_jobs': total_download,
'job_state_counts': dict(job_counts),
'job_partition_state_counts': dict(partition_counts),
}
def filesystem_avail_bytes(path: Path) -> int:
try:
proc = subprocess.run(['df', '-B1', str(path)], check=False, capture_output=True, text=True)
lines = (proc.stdout or '').splitlines()
if len(lines) < 2:
return 0
fields = lines[1].split()
if len(fields) < 4:
return 0
return int(fields[3])
except Exception:
return 0
def human_bytes(num: int) -> str:
value = float(num)
for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB']:
if value < 1024.0:
return f'{value:.1f}{unit}'
value /= 1024.0
return f'{value:.1f}EB'
def read_source_manifest_count(path: Path) -> int:
if not path.exists():
return 0
count = 0
with path.open('r', encoding='utf-8-sig', newline='') as f:
reader = csv.reader(f)
for row in reader:
if not row:
continue
if not (row[0] or '').strip():
continue
count += 1
return count
def read_processed_progress(path: Path) -> dict[str, object]:
result = {
'csv_exists': path.exists(),
'csv_ok': False,
'csv_error': '',
'processed_rows': 0,
'download_ok_rows': 0,
'download_skipped_rows': 0,
'download_running_rows': 0,
'download_pending_rows': 0,
'process_ok_rows': 0,
'process_running_rows': 0,
'upload_uploaded_rows': 0,
}
if not path.exists():
result['csv_error'] = 'missing'
return result
try:
with path.open('r', encoding='utf-8-sig', newline='') as f:
reader = csv.DictReader(f)
fieldnames = list(reader.fieldnames or [])
missing = sorted(PROCESSED_REQUIRED_COLUMNS - set(fieldnames))
if missing:
result['csv_error'] = f'missing_columns:{",".join(missing)}'
return result
rows = list(reader)
result['processed_rows'] = len(rows)
for row in rows:
d = (row.get('download_status') or '').strip()
p = (row.get('process_status') or '').strip()
u = (row.get('upload_status') or '').strip()
if d == 'ok':
result['download_ok_rows'] += 1
elif d == 'skipped':
result['download_skipped_rows'] += 1
elif d == 'running':
result['download_running_rows'] += 1
else:
result['download_pending_rows'] += 1
if p == 'ok':
result['process_ok_rows'] += 1
elif p == 'running':
result['process_running_rows'] += 1
if u == 'uploaded':
result['upload_uploaded_rows'] += 1
result['csv_ok'] = True
return result
except Exception as exc:
result['csv_error'] = str(exc)
return result
def run_sync(runtime_root: Path) -> str:
sync_script = Path('/cache/home/sf895/SignVerse-2M/scripts/sync_processed_csv_from_runtime.py')
if not sync_script.exists():
return 'missing_sync_script'
cmd = [
'python3', str(sync_script),
'--source-metadata-csv', str(runtime_root / 'SignVerse-2M-metadata_ori.csv'),
'--output-metadata-csv', str(runtime_root / 'SignVerse-2M-metadata_processed.csv'),
'--raw-video-dir', str(runtime_root / 'raw_video'),
'--raw-caption-dir', str(runtime_root / 'raw_caption'),
'--raw-metadata-dir', str(runtime_root / 'raw_metadata'),
'--dataset-dir', str(runtime_root / 'dataset'),
'--progress-path', str(runtime_root / 'archive_upload_progress.json'),
'--status-journal-path', str(runtime_root / 'upload_status_journal.jsonl'),
]
try:
proc = subprocess.run(cmd, check=False, capture_output=True, text=True)
except OSError as exc:
return f'error:{exc}'
if proc.returncode == 0:
return (proc.stdout or '').strip() or 'ok'
err = (proc.stderr or proc.stdout or '').strip()
return f'failed:{err}'
def main() -> None:
parser = argparse.ArgumentParser(description='Report Sign-DWPose runtime status.')
parser.add_argument('--runtime-root', default='/home/sf895/SignVerse-2M-runtime')
parser.add_argument('--username', default='sf895')
parser.add_argument('--no-sync', action='store_true')
parser.add_argument('--json', action='store_true')
args = parser.parse_args()
runtime_root = Path(args.runtime_root)
raw_dir = runtime_root / 'raw_video'
dataset_dir = runtime_root / 'dataset'
claims_dir = runtime_root / 'slurm' / 'state' / 'claims'
download_claims_dir = runtime_root / 'slurm' / 'state' / 'download_claims'
progress_path = runtime_root / 'archive_upload_progress.json'
source_csv = runtime_root / 'SignVerse-2M-metadata_ori.csv'
processed_csv = runtime_root / 'SignVerse-2M-metadata_processed.csv'
sync_result = 'skipped'
if not args.no_sync:
sync_result = run_sync(runtime_root)
raw_complete: list[Path] = []
raw_temp: list[Path] = []
if raw_dir.exists():
for path in raw_dir.iterdir():
if not path.is_file():
continue
if path.suffix.lower() in VIDEO_EXTS:
raw_complete.append(path)
else:
raw_temp.append(path)
raw_size = sum_file_sizes(raw_complete)
runtime_size = 0
if runtime_root.exists():
for path in runtime_root.rglob('*'):
try:
if path.is_file():
runtime_size += path.stat().st_size
except FileNotFoundError:
continue
source_rows = read_source_manifest_count(source_csv)
progress = read_processed_progress(processed_csv)
payload = {
'sync_result': sync_result,
'download_normal': len(raw_temp) == 0,
'raw_videos': len(raw_complete),
'raw_temp_files': len(raw_temp),
'sent_to_gpu': count_claims(claims_dir),
'processed_complete': count_complete(dataset_dir),
'active_downloads': count_claims(download_claims_dir),
'uploaded_archives': 0,
'uploaded_folders': 0,
'raw_size_bytes': raw_size,
'runtime_size_bytes': runtime_size,
'filesystem_avail_bytes': filesystem_avail_bytes(runtime_root),
'source_rows': source_rows,
'csv_exists': progress['csv_exists'],
'csv_ok': progress['csv_ok'],
'csv_error': progress['csv_error'],
'processed_rows': progress['processed_rows'],
'download_ok_rows': progress['download_ok_rows'],
'download_skipped_rows': progress['download_skipped_rows'],
'download_running_rows': progress['download_running_rows'],
'download_pending_rows': progress['download_pending_rows'],
'process_ok_rows': progress['process_ok_rows'],
'process_running_rows': progress['process_running_rows'],
'upload_uploaded_rows': progress['upload_uploaded_rows'],
}
uploaded_archives, uploaded_folders = count_uploaded(progress_path)
payload['uploaded_archives'] = uploaded_archives
payload['uploaded_folders'] = uploaded_folders
payload.update(queue_status(args.username))
payload['csv_row_match'] = (payload['processed_rows'] == payload['source_rows']) if payload['csv_ok'] else False
if args.json:
print(json.dumps(payload, ensure_ascii=False, indent=2, sort_keys=True))
return
print(f"sync_result={payload['sync_result']}")
print(f"download_normal={payload['download_normal']}")
print(f"raw_videos={payload['raw_videos']}")
print(f"raw_temp_files={payload['raw_temp_files']}")
print(f"sent_to_gpu={payload['sent_to_gpu']}")
print(f"running_dwpose={payload['running_dwpose']}")
print(f"processed_complete={payload['processed_complete']}")
print(f"active_downloads={payload['active_downloads']}")
print(f"running_download_jobs={payload['running_download']}")
print(f"pending_download_jobs={payload['pending_download_jobs']}")
print(f"total_download_jobs={payload['total_download_jobs']}")
print(f"uploaded_archives={payload['uploaded_archives']}")
print(f"uploaded_folders={payload['uploaded_folders']}")
print(f"source_rows={payload['source_rows']}")
print(f"processed_rows={payload['processed_rows']}")
print(f"csv_ok={payload['csv_ok']}")
print(f"csv_row_match={payload['csv_row_match']}")
print(f"csv_error={payload['csv_error']}")
print(f"download_ok_rows={payload['download_ok_rows']}")
print(f"download_skipped_rows={payload['download_skipped_rows']}")
print(f"download_running_rows={payload['download_running_rows']}")
print(f"download_pending_rows={payload['download_pending_rows']}")
print(f"process_ok_rows={payload['process_ok_rows']}")
print(f"process_running_rows={payload['process_running_rows']}")
print(f"upload_uploaded_rows={payload['upload_uploaded_rows']}")
print(f"raw_size={human_bytes(payload['raw_size_bytes'])}")
print(f"runtime_size={human_bytes(payload['runtime_size_bytes'])}")
print(f"filesystem_avail={human_bytes(payload['filesystem_avail_bytes'])}")
if __name__ == '__main__':
main()