Sen Fang commited on
Commit ·
bc864be
1
Parent(s): 1279c9c
Harden runtime state and exclude uploaded videos
Browse files- reproduce_independently.sh +4 -0
- reproduce_independently_slurm.sh +177 -70
- scripts/pipeline01_download_video_fix_caption.py +53 -45
- scripts/pipeline02_extract_dwpose_from_video.py +28 -9
- scripts/pipeline03_upload_to_huggingface.py +286 -102
- scripts/runtime_status.py +155 -18
- scripts/sync_processed_csv_from_runtime.py +13 -2
- slurm/orchestrator_autorestart.slurm +39 -8
- slurm/process_download_array.slurm +8 -0
- slurm/process_dwpose_array.slurm +107 -49
- slurm/process_upload_parallel_array.slurm +99 -0
- slurm/run_reproduce_independently_slurm.slurm +2 -2
- slurm/submit_download_slurm.sh +50 -14
- slurm/submit_dwpose_slurm.sh +223 -58
- slurm/submit_upload_parallel_drain.sh +87 -0
- slurm/submit_upload_parallel_slurm.sh +95 -0
- slurm/watch_submit_dwpose.slurm +2 -2
- utils/dataset_pool.py +74 -0
- utils/raw_video_pool.py +164 -0
- utils/stats_npz.py +133 -3
reproduce_independently.sh
CHANGED
|
@@ -294,13 +294,17 @@ run_upload_stage() {
|
|
| 294 |
local require_target="${1:-0}"
|
| 295 |
local cmd=(python "$PIPELINE03"
|
| 296 |
--dataset-dir "$DATASET_DIR"
|
|
|
|
| 297 |
--raw-video-dir "$RAW_VIDEO_DIR"
|
|
|
|
| 298 |
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 299 |
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 300 |
--archive-dir "$ARCHIVE_DIR"
|
| 301 |
--progress-path "$PROGRESS_JSON"
|
| 302 |
--stats-npz "$STATS_NPZ"
|
|
|
|
| 303 |
--repo-id "$REPO_ID"
|
|
|
|
| 304 |
--target-bytes "$TARGET_BYTES"
|
| 305 |
--target-folders "$TARGET_FOLDERS"
|
| 306 |
)
|
|
|
|
| 294 |
local require_target="${1:-0}"
|
| 295 |
local cmd=(python "$PIPELINE03"
|
| 296 |
--dataset-dir "$DATASET_DIR"
|
| 297 |
+
--scratch-dataset-dir "$SCRATCH_DATASET_DIR"
|
| 298 |
--raw-video-dir "$RAW_VIDEO_DIR"
|
| 299 |
+
--scratch-raw-video-dir "$SCRATCH_RAW_VIDEO_DIR"
|
| 300 |
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 301 |
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 302 |
--archive-dir "$ARCHIVE_DIR"
|
| 303 |
--progress-path "$PROGRESS_JSON"
|
| 304 |
--stats-npz "$STATS_NPZ"
|
| 305 |
+
--status-journal-path "$RUNTIME_ROOT/upload_status_journal.jsonl"
|
| 306 |
--repo-id "$REPO_ID"
|
| 307 |
+
--repo-revision "$REPO_REVISION"
|
| 308 |
--target-bytes "$TARGET_BYTES"
|
| 309 |
--target-folders "$TARGET_FOLDERS"
|
| 310 |
)
|
reproduce_independently_slurm.sh
CHANGED
|
@@ -13,12 +13,18 @@ if [[ ! -f "$SOURCE_METADATA_CSV" && -f "$ROOT_DIR/SignVerse-2M-metadata_ori.csv
|
|
| 13 |
fi
|
| 14 |
OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv}"
|
| 15 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}"
|
| 17 |
RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}"
|
| 18 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
| 19 |
ARCHIVE_DIR="${ARCHIVE_DIR:-$RUNTIME_ROOT/archives}"
|
| 20 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
| 21 |
PROGRESS_JSON="${PROGRESS_JSON:-$RUNTIME_ROOT/archive_upload_progress.json}"
|
|
|
|
| 22 |
|
| 23 |
PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}"
|
| 24 |
PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}"
|
|
@@ -36,13 +42,15 @@ DOWNLOAD_BATCH_SIZE="${DOWNLOAD_BATCH_SIZE:-1}"
|
|
| 36 |
DOWNLOAD_WORKERS="${DOWNLOAD_WORKERS:-60}"
|
| 37 |
USE_SLURM_DOWNLOAD="${USE_SLURM_DOWNLOAD:-1}"
|
| 38 |
PROCESS_BATCH_SIZE="${PROCESS_BATCH_SIZE:-}"
|
|
|
|
| 39 |
MIN_PROCESS_START_BACKLOG="${MIN_PROCESS_START_BACKLOG:-4}"
|
| 40 |
PROCESS_PENDING_TIMEOUT_SECONDS="${PROCESS_PENDING_TIMEOUT_SECONDS:-1800}"
|
| 41 |
-
RAW_BACKLOG_LIMIT="${RAW_BACKLOG_LIMIT:-
|
| 42 |
MAX_RAW_VIDEO_BYTES="${MAX_RAW_VIDEO_BYTES:-0}"
|
| 43 |
MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
|
| 44 |
IDLE_SLEEP_SECONDS="${IDLE_SLEEP_SECONDS:-5}"
|
| 45 |
REPO_ID="${REPO_ID:-SignerX/SignVerse-2M}"
|
|
|
|
| 46 |
COOKIES_FILE="${COOKIES_FILE:-$ROOT_DIR/www.youtube.com_cookies (2).txt}"
|
| 47 |
COOKIES_FROM_BROWSER="${COOKIES_FROM_BROWSER:-}"
|
| 48 |
EXTRACTOR_ARGS="${EXTRACTOR_ARGS:-}"
|
|
@@ -51,7 +59,7 @@ SLURM_DOWNLOAD_SUBMIT_SCRIPT="${SLURM_DOWNLOAD_SUBMIT_SCRIPT:-$ROOT_DIR/slurm/su
|
|
| 51 |
GPU_PARTITIONS="gpu,gpu-redhat,cgpu"
|
| 52 |
GPU_ACCOUNT="${GPU_ACCOUNT:-}"
|
| 53 |
ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
|
| 54 |
-
MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-
|
| 55 |
DOWNLOAD_CLAIM_DIR="${DOWNLOAD_CLAIM_DIR:-$STATE_ROOT/slurm/state/download_claims}"
|
| 56 |
DOWNLOAD_CSV_LOCK_PATH="${DOWNLOAD_CSV_LOCK_PATH:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv.lock}"
|
| 57 |
DOWNLOAD_PARTITIONS="${DOWNLOAD_PARTITIONS:-main}"
|
|
@@ -67,7 +75,7 @@ DOWNLOAD_CLAIM_GRACE_SECONDS="${DOWNLOAD_CLAIM_GRACE_SECONDS:-600}"
|
|
| 67 |
DOWNLOAD_PARTIAL_TIMEOUT_SECONDS="${DOWNLOAD_PARTIAL_TIMEOUT_SECONDS:-1800}"
|
| 68 |
ORCHESTRATOR_PARTITION="${ORCHESTRATOR_PARTITION:-main}"
|
| 69 |
ORCHESTRATOR_ACCOUNT="${ORCHESTRATOR_ACCOUNT:-}"
|
| 70 |
-
ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-
|
| 71 |
ORCHESTRATOR_CPUS_PER_TASK="${ORCHESTRATOR_CPUS_PER_TASK:-4}"
|
| 72 |
ORCHESTRATOR_MEM="${ORCHESTRATOR_MEM:-8G}"
|
| 73 |
RUN_LOCAL="${RUN_LOCAL:-0}"
|
|
@@ -109,6 +117,7 @@ Options:
|
|
| 109 |
--target-folders N
|
| 110 |
--download-batch-size N
|
| 111 |
--process-batch-size N
|
|
|
|
| 112 |
--use-slurm-download {0,1}
|
| 113 |
--download-partitions P1[,P2,...]
|
| 114 |
--download-account NAME
|
|
@@ -121,6 +130,7 @@ Options:
|
|
| 121 |
--max-iterations N
|
| 122 |
--idle-sleep-seconds N
|
| 123 |
--repo-id REPO
|
|
|
|
| 124 |
--cookies FILE
|
| 125 |
--cookies-from-browser BROWSER
|
| 126 |
--extractor-args VALUE
|
|
@@ -265,6 +275,10 @@ while [[ $# -gt 0 ]]; do
|
|
| 265 |
PROCESS_BATCH_SIZE="$2"
|
| 266 |
shift 2
|
| 267 |
;;
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
--raw-backlog-limit)
|
| 269 |
RAW_BACKLOG_LIMIT="$2"
|
| 270 |
shift 2
|
|
@@ -285,6 +299,10 @@ while [[ $# -gt 0 ]]; do
|
|
| 285 |
REPO_ID="$2"
|
| 286 |
shift 2
|
| 287 |
;;
|
|
|
|
|
|
|
|
|
|
|
|
|
| 288 |
--cookies)
|
| 289 |
COOKIES_FILE="$2"
|
| 290 |
shift 2
|
|
@@ -353,18 +371,21 @@ if [[ -z "${SLURM_JOB_ID:-}" && "$RUN_LOCAL" != "1" ]]; then
|
|
| 353 |
echo "Missing orchestration wrapper: $wrapper" >&2
|
| 354 |
exit 1
|
| 355 |
fi
|
| 356 |
-
export_args="ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,SOURCE_METADATA_CSV=$SOURCE_METADATA_CSV,OUTPUT_METADATA_CSV=$OUTPUT_METADATA_CSV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,RAW_CAPTION_DIR=$RAW_CAPTION_DIR,RAW_METADATA_DIR=$RAW_METADATA_DIR,DATASET_DIR=$DATASET_DIR,ARCHIVE_DIR=$ARCHIVE_DIR,STATS_NPZ=$STATS_NPZ,PROGRESS_JSON=$PROGRESS_JSON,STAGE=$STAGE,LIMIT=$LIMIT,FPS=$FPS,WORKERS=$WORKERS,TARGET_BYTES=$TARGET_BYTES,DOWNLOAD_BATCH_SIZE=$DOWNLOAD_BATCH_SIZE,DOWNLOAD_WORKERS=$DOWNLOAD_WORKERS,USE_SLURM_DOWNLOAD=$USE_SLURM_DOWNLOAD,SLURM_DOWNLOAD_SUBMIT_SCRIPT=$SLURM_DOWNLOAD_SUBMIT_SCRIPT,DOWNLOAD_PARTITIONS=$DOWNLOAD_PARTITIONS,DOWNLOAD_ACCOUNT=$DOWNLOAD_ACCOUNT,DOWNLOAD_TIME=$DOWNLOAD_TIME,DOWNLOAD_CPUS_PER_TASK=$DOWNLOAD_CPUS_PER_TASK,DOWNLOAD_MEM=$DOWNLOAD_MEM,DOWNLOAD_ARRAY_PARALLEL=$DOWNLOAD_ARRAY_PARALLEL,DOWNLOAD_MAX_ACTIVE=$DOWNLOAD_MAX_ACTIVE,DOWNLOAD_START_STAGGER_MIN=$DOWNLOAD_START_STAGGER_MIN,DOWNLOAD_START_STAGGER_MAX=$DOWNLOAD_START_STAGGER_MAX,PROCESS_BATCH_SIZE=$PROCESS_BATCH_SIZE,DOWNLOAD_CLAIM_DIR=$DOWNLOAD_CLAIM_DIR,DOWNLOAD_CSV_LOCK_PATH=$DOWNLOAD_CSV_LOCK_PATH,MIN_PROCESS_START_BACKLOG=$MIN_PROCESS_START_BACKLOG,RAW_BACKLOG_LIMIT=$RAW_BACKLOG_LIMIT,MAX_RAW_VIDEO_BYTES=$MAX_RAW_VIDEO_BYTES,MAX_ITERATIONS=$MAX_ITERATIONS,IDLE_SLEEP_SECONDS=$IDLE_SLEEP_SECONDS,REPO_ID=$REPO_ID,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS,GPU_PARTITIONS=$GPU_PARTITIONS,GPU_ACCOUNT=$GPU_ACCOUNT,ARRAY_PARALLEL=$ARRAY_PARALLEL,MAX_BACKLOG_VIDEOS=$MAX_BACKLOG_VIDEOS,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,FORCE_PROCESS=$FORCE_PROCESS,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,DRY_RUN_UPLOAD=$DRY_RUN_UPLOAD,RUN_LOCAL=1"
|
| 357 |
if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
|
| 358 |
export VIDEO_IDS_JOINED
|
| 359 |
VIDEO_IDS_JOINED="${VIDEO_IDS[*]}"
|
| 360 |
export_args+=",VIDEO_IDS_JOINED=$VIDEO_IDS_JOINED"
|
| 361 |
fi
|
|
|
|
| 362 |
cmd=(sbatch
|
| 363 |
--partition "$ORCHESTRATOR_PARTITION"
|
| 364 |
--time "$ORCHESTRATOR_TIME"
|
| 365 |
--cpus-per-task "$ORCHESTRATOR_CPUS_PER_TASK"
|
| 366 |
--mem "$ORCHESTRATOR_MEM"
|
| 367 |
-
--
|
|
|
|
|
|
|
| 368 |
)
|
| 369 |
if [[ -n "$ORCHESTRATOR_ACCOUNT" ]]; then
|
| 370 |
cmd+=(--account "$ORCHESTRATOR_ACCOUNT")
|
|
@@ -379,7 +400,7 @@ if [[ -n "${VIDEO_IDS_JOINED:-}" && ${#VIDEO_IDS[@]} -eq 0 ]]; then
|
|
| 379 |
IFS=' ' read -r -a VIDEO_IDS <<< "$VIDEO_IDS_JOINED"
|
| 380 |
fi
|
| 381 |
|
| 382 |
-
mkdir -p "$RAW_VIDEO_DIR" "$RAW_CAPTION_DIR" "$RAW_METADATA_DIR" "$DATASET_DIR"
|
| 383 |
if [[ ! -x "$SLURM_PROCESS_SUBMIT_SCRIPT" ]]; then
|
| 384 |
echo "Missing Slurm submit script: $SLURM_PROCESS_SUBMIT_SCRIPT" >&2
|
| 385 |
exit 1
|
|
@@ -402,7 +423,10 @@ run_download_stage() {
|
|
| 402 |
--time "$DOWNLOAD_TIME"
|
| 403 |
--cpus-per-task "$DOWNLOAD_CPUS_PER_TASK"
|
| 404 |
--mem "$DOWNLOAD_MEM"
|
| 405 |
-
--max-backlog-videos "$RAW_BACKLOG_LIMIT"
|
|
|
|
|
|
|
|
|
|
| 406 |
--workers "$DOWNLOAD_WORKERS"
|
| 407 |
--max-active-downloads "$DOWNLOAD_MAX_ACTIVE"
|
| 408 |
--claim-dir "$DOWNLOAD_CLAIM_DIR"
|
|
@@ -456,11 +480,14 @@ run_download_stage() {
|
|
| 456 |
local cmd=(python "$PIPELINE01"
|
| 457 |
--source-metadata-csv "$SOURCE_METADATA_CSV"
|
| 458 |
--output-metadata-csv "$OUTPUT_METADATA_CSV"
|
| 459 |
-
--raw-video-dir "$RAW_VIDEO_DIR"
|
|
|
|
| 460 |
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 461 |
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 462 |
--dataset-dir "$DATASET_DIR"
|
|
|
|
| 463 |
--stats-npz "$STATS_NPZ"
|
|
|
|
| 464 |
--claim-dir "$DOWNLOAD_CLAIM_DIR"
|
| 465 |
--csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH"
|
| 466 |
)
|
|
@@ -527,6 +554,9 @@ run_process_stage() {
|
|
| 527 |
if [[ -n "$ARRAY_PARALLEL" ]]; then
|
| 528 |
cmd+=(--array-parallel "$ARRAY_PARALLEL")
|
| 529 |
fi
|
|
|
|
|
|
|
|
|
|
| 530 |
if [[ -n "$GPU_ACCOUNT" ]]; then
|
| 531 |
cmd+=(--account "$GPU_ACCOUNT")
|
| 532 |
fi
|
|
@@ -548,9 +578,11 @@ run_sync_csv_stage() {
|
|
| 548 |
--source-metadata-csv "$SOURCE_METADATA_CSV" \
|
| 549 |
--output-metadata-csv "$OUTPUT_METADATA_CSV" \
|
| 550 |
--raw-video-dir "$RAW_VIDEO_DIR" \
|
|
|
|
| 551 |
--raw-caption-dir "$RAW_CAPTION_DIR" \
|
| 552 |
--raw-metadata-dir "$RAW_METADATA_DIR" \
|
| 553 |
--dataset-dir "$DATASET_DIR" \
|
|
|
|
| 554 |
--progress-path "$PROGRESS_JSON" \
|
| 555 |
--status-journal-path "$RUNTIME_ROOT/upload_status_journal.jsonl"
|
| 556 |
}
|
|
@@ -559,13 +591,17 @@ run_upload_stage() {
|
|
| 559 |
local require_target="${1:-0}"
|
| 560 |
local cmd=(python "$PIPELINE03"
|
| 561 |
--dataset-dir "$DATASET_DIR"
|
|
|
|
| 562 |
--raw-video-dir "$RAW_VIDEO_DIR"
|
|
|
|
| 563 |
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 564 |
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 565 |
--archive-dir "$ARCHIVE_DIR"
|
| 566 |
--progress-path "$PROGRESS_JSON"
|
| 567 |
--stats-npz "$STATS_NPZ"
|
|
|
|
| 568 |
--repo-id "$REPO_ID"
|
|
|
|
| 569 |
--target-bytes "$TARGET_BYTES"
|
| 570 |
--target-folders "$TARGET_FOLDERS"
|
| 571 |
)
|
|
@@ -583,18 +619,22 @@ run_upload_stage() {
|
|
| 583 |
prune_processed_raw_videos() {
|
| 584 |
python - <<PY
|
| 585 |
from pathlib import Path
|
| 586 |
-
|
| 587 |
-
|
| 588 |
video_extensions = {".mp4", ".mkv", ".webm", ".mov"}
|
| 589 |
removed = 0
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 598 |
print(removed)
|
| 599 |
PY
|
| 600 |
}
|
|
@@ -619,13 +659,20 @@ PY
|
|
| 619 |
count_pending_process() {
|
| 620 |
python - <<PY
|
| 621 |
from pathlib import Path
|
| 622 |
-
|
| 623 |
video_extensions = {".mp4", ".mkv", ".webm", ".mov"}
|
| 624 |
pending = 0
|
| 625 |
-
|
|
|
|
|
|
|
|
|
|
| 626 |
for video_path in raw_dir.iterdir():
|
| 627 |
-
if video_path.is_file()
|
| 628 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 629 |
print(pending)
|
| 630 |
PY
|
| 631 |
}
|
|
@@ -806,7 +853,7 @@ cleanup_stale_download_partials() {
|
|
| 806 |
import time
|
| 807 |
from pathlib import Path
|
| 808 |
claim_dir = Path("$DOWNLOAD_CLAIM_DIR")
|
| 809 |
-
|
| 810 |
timeout = int("$DOWNLOAD_PARTIAL_TIMEOUT_SECONDS")
|
| 811 |
now = time.time()
|
| 812 |
active_ids = set()
|
|
@@ -817,7 +864,9 @@ try:
|
|
| 817 |
except Exception:
|
| 818 |
pass
|
| 819 |
removed = 0
|
| 820 |
-
|
|
|
|
|
|
|
| 821 |
for path in raw_dir.iterdir():
|
| 822 |
if not path.is_file():
|
| 823 |
continue
|
|
@@ -864,26 +913,45 @@ PY
|
|
| 864 |
|
| 865 |
count_active_process_claims() {
|
| 866 |
python - <<PY
|
|
|
|
| 867 |
import subprocess
|
| 868 |
from datetime import datetime, timedelta
|
| 869 |
from pathlib import Path
|
| 870 |
-
|
| 871 |
claim_dir = Path("$STATE_ROOT/slurm/state/claims")
|
| 872 |
claim_dir.mkdir(parents=True, exist_ok=True)
|
| 873 |
timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS")
|
|
|
|
| 874 |
now = datetime.now()
|
| 875 |
job_states = {}
|
| 876 |
base_states = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 877 |
try:
|
| 878 |
result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%i|%T"], check=True, capture_output=True, text=True)
|
| 879 |
for line in result.stdout.splitlines():
|
| 880 |
if not line.strip() or "|" not in line:
|
| 881 |
continue
|
| 882 |
-
|
| 883 |
-
job_key = job_key.strip()
|
| 884 |
state = state.strip().upper()
|
| 885 |
-
|
| 886 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 887 |
except Exception:
|
| 888 |
job_states = {}
|
| 889 |
base_states = {}
|
|
@@ -912,6 +980,17 @@ for claim_path in claim_dir.glob("*.claim"):
|
|
| 912 |
else:
|
| 913 |
state = base_states.get(job_id)
|
| 914 |
if not state:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 915 |
claim_path.unlink(missing_ok=True)
|
| 916 |
continue
|
| 917 |
submitted_at = meta.get("submitted_at", "")
|
|
@@ -923,7 +1002,8 @@ for claim_path in claim_dir.glob("*.claim"):
|
|
| 923 |
except Exception:
|
| 924 |
stale_pending = False
|
| 925 |
if stale_pending:
|
| 926 |
-
|
|
|
|
| 927 |
claim_path.unlink(missing_ok=True)
|
| 928 |
continue
|
| 929 |
count += 1
|
|
@@ -933,6 +1013,7 @@ PY
|
|
| 933 |
|
| 934 |
cleanup_stale_process_jobs() {
|
| 935 |
python - <<PY
|
|
|
|
| 936 |
import subprocess
|
| 937 |
from datetime import datetime, timedelta
|
| 938 |
from pathlib import Path
|
|
@@ -940,6 +1021,7 @@ from pathlib import Path
|
|
| 940 |
claim_dir = Path("$STATE_ROOT/slurm/state/claims")
|
| 941 |
claim_dir.mkdir(parents=True, exist_ok=True)
|
| 942 |
timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS")
|
|
|
|
| 943 |
now = datetime.now()
|
| 944 |
|
| 945 |
bad_reason_tokens = (
|
|
@@ -953,6 +1035,18 @@ bad_reason_tokens = (
|
|
| 953 |
timed_pending_reasons = ("Priority", "Resources", "QOS")
|
| 954 |
|
| 955 |
squeue_rows = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 956 |
try:
|
| 957 |
proc = subprocess.run(
|
| 958 |
["squeue", "-h", "-u", "$USER", "-n", "dwpose", "-o", "%i|%T|%R"],
|
|
@@ -963,8 +1057,14 @@ try:
|
|
| 963 |
for line in proc.stdout.splitlines():
|
| 964 |
if not line.strip() or "|" not in line:
|
| 965 |
continue
|
| 966 |
-
|
| 967 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 968 |
except Exception:
|
| 969 |
squeue_rows = {}
|
| 970 |
|
|
@@ -991,6 +1091,15 @@ for claim_path in claim_dir.glob("*.claim"):
|
|
| 991 |
continue
|
| 992 |
row = squeue_rows.get(job_key)
|
| 993 |
if not row:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 994 |
claim_path.unlink(missing_ok=True)
|
| 995 |
removed_claims += 1
|
| 996 |
continue
|
|
@@ -1008,8 +1117,9 @@ for claim_path in claim_dir.glob("*.claim"):
|
|
| 1008 |
except Exception:
|
| 1009 |
pass
|
| 1010 |
if should_cancel:
|
| 1011 |
-
|
| 1012 |
-
|
|
|
|
| 1013 |
claim_path.unlink(missing_ok=True)
|
| 1014 |
removed_claims += 1
|
| 1015 |
|
|
@@ -1073,51 +1183,43 @@ PY
|
|
| 1073 |
}
|
| 1074 |
|
| 1075 |
count_complete_pending_upload() {
|
| 1076 |
-
python - <<PY
|
| 1077 |
import json
|
| 1078 |
from pathlib import Path
|
|
|
|
|
|
|
| 1079 |
dataset_dir = Path("$DATASET_DIR")
|
|
|
|
| 1080 |
progress_path = Path("$PROGRESS_JSON")
|
| 1081 |
-
uploaded =
|
| 1082 |
if progress_path.exists():
|
| 1083 |
-
uploaded =
|
| 1084 |
-
|
| 1085 |
-
if dataset_dir.exists():
|
| 1086 |
-
for folder_path in dataset_dir.iterdir():
|
| 1087 |
-
if not folder_path.is_dir():
|
| 1088 |
-
continue
|
| 1089 |
-
if folder_path.name in uploaded:
|
| 1090 |
-
continue
|
| 1091 |
-
if (folder_path / "npz" / ".complete").exists():
|
| 1092 |
-
count += 1
|
| 1093 |
-
print(count)
|
| 1094 |
PY
|
| 1095 |
}
|
| 1096 |
|
| 1097 |
bytes_complete_pending_upload() {
|
| 1098 |
-
python - <<PY
|
| 1099 |
import json
|
| 1100 |
from pathlib import Path
|
|
|
|
|
|
|
| 1101 |
dataset_dir = Path("$DATASET_DIR")
|
|
|
|
| 1102 |
progress_path = Path("$PROGRESS_JSON")
|
| 1103 |
-
uploaded =
|
| 1104 |
if progress_path.exists():
|
| 1105 |
-
uploaded =
|
| 1106 |
total = 0
|
| 1107 |
-
|
| 1108 |
-
for
|
| 1109 |
-
if
|
| 1110 |
-
|
| 1111 |
-
|
| 1112 |
-
|
| 1113 |
-
|
| 1114 |
-
|
| 1115 |
-
|
| 1116 |
-
if path.is_file():
|
| 1117 |
-
try:
|
| 1118 |
-
total += path.stat().st_size
|
| 1119 |
-
except OSError:
|
| 1120 |
-
pass
|
| 1121 |
print(total)
|
| 1122 |
PY
|
| 1123 |
}
|
|
@@ -1144,7 +1246,7 @@ download_loop() {
|
|
| 1144 |
[[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0
|
| 1145 |
pending_process="$(count_pending_process 2>/dev/null || true)"
|
| 1146 |
[[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0
|
| 1147 |
-
raw_video_bytes="$(dir_size_bytes "$RAW_VIDEO_DIR" 2>/dev/null ||
|
| 1148 |
[[ "$raw_video_bytes" =~ ^[0-9]+$ ]] || raw_video_bytes=0
|
| 1149 |
echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned stale_download_claims=$stale_download_claims stale_download_partials=$stale_download_partials stale_process_jobs=$stale_process_jobs stale_process_claims=$stale_process_claims stale_orphan_process_jobs=$stale_orphan_process_jobs"
|
| 1150 |
|
|
@@ -1288,12 +1390,17 @@ upload_loop() {
|
|
| 1288 |
while true; do
|
| 1289 |
iteration=$((iteration + 1))
|
| 1290 |
local pruned
|
| 1291 |
-
pruned="$(prune_processed_raw_videos)"
|
|
|
|
| 1292 |
local pending_download pending_process complete_pending_upload complete_pending_upload_bytes
|
| 1293 |
-
pending_download="$(count_pending_downloads)"
|
| 1294 |
-
|
| 1295 |
-
|
| 1296 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1297 |
echo "[upload] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process complete_pending_upload=$complete_pending_upload complete_pending_upload_bytes=$complete_pending_upload_bytes pruned_raw_videos=$pruned"
|
| 1298 |
|
| 1299 |
if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
|
|
|
|
| 13 |
fi
|
| 14 |
OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv}"
|
| 15 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
| 16 |
+
SCRATCH_RAW_VIDEO_DIR="${SCRATCH_RAW_VIDEO_DIR:-/scratch/$USER/SignVerse-2M-runtime/raw_video}"
|
| 17 |
+
SCRATCH_DATASET_DIR="${SCRATCH_DATASET_DIR:-/scratch/$USER/SignVerse-2M-runtime/dataset}"
|
| 18 |
+
HOME_RAW_VIDEO_LIMIT="${HOME_RAW_VIDEO_LIMIT:-180}"
|
| 19 |
+
SCRATCH_RAW_VIDEO_LIMIT="${SCRATCH_RAW_VIDEO_LIMIT:-2800}"
|
| 20 |
+
DEFAULT_TOTAL_RAW_VIDEO_LIMIT="$((HOME_RAW_VIDEO_LIMIT + SCRATCH_RAW_VIDEO_LIMIT))"
|
| 21 |
RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}"
|
| 22 |
RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}"
|
| 23 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
| 24 |
ARCHIVE_DIR="${ARCHIVE_DIR:-$RUNTIME_ROOT/archives}"
|
| 25 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
| 26 |
PROGRESS_JSON="${PROGRESS_JSON:-$RUNTIME_ROOT/archive_upload_progress.json}"
|
| 27 |
+
RUNTIME_LOG_ROOT="${RUNTIME_LOG_ROOT:-/scratch/$USER/SignVerse-2M-runtime/slurm/logs}"
|
| 28 |
|
| 29 |
PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}"
|
| 30 |
PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}"
|
|
|
|
| 42 |
DOWNLOAD_WORKERS="${DOWNLOAD_WORKERS:-60}"
|
| 43 |
USE_SLURM_DOWNLOAD="${USE_SLURM_DOWNLOAD:-1}"
|
| 44 |
PROCESS_BATCH_SIZE="${PROCESS_BATCH_SIZE:-}"
|
| 45 |
+
VIDEOS_PER_JOB="${VIDEOS_PER_JOB:-5}"
|
| 46 |
MIN_PROCESS_START_BACKLOG="${MIN_PROCESS_START_BACKLOG:-4}"
|
| 47 |
PROCESS_PENDING_TIMEOUT_SECONDS="${PROCESS_PENDING_TIMEOUT_SECONDS:-1800}"
|
| 48 |
+
RAW_BACKLOG_LIMIT="${RAW_BACKLOG_LIMIT:-$DEFAULT_TOTAL_RAW_VIDEO_LIMIT}"
|
| 49 |
MAX_RAW_VIDEO_BYTES="${MAX_RAW_VIDEO_BYTES:-0}"
|
| 50 |
MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
|
| 51 |
IDLE_SLEEP_SECONDS="${IDLE_SLEEP_SECONDS:-5}"
|
| 52 |
REPO_ID="${REPO_ID:-SignerX/SignVerse-2M}"
|
| 53 |
+
REPO_REVISION="${REPO_REVISION:-dev}"
|
| 54 |
COOKIES_FILE="${COOKIES_FILE:-$ROOT_DIR/www.youtube.com_cookies (2).txt}"
|
| 55 |
COOKIES_FROM_BROWSER="${COOKIES_FROM_BROWSER:-}"
|
| 56 |
EXTRACTOR_ARGS="${EXTRACTOR_ARGS:-}"
|
|
|
|
| 59 |
GPU_PARTITIONS="gpu,gpu-redhat,cgpu"
|
| 60 |
GPU_ACCOUNT="${GPU_ACCOUNT:-}"
|
| 61 |
ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
|
| 62 |
+
MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-$DEFAULT_TOTAL_RAW_VIDEO_LIMIT}"
|
| 63 |
DOWNLOAD_CLAIM_DIR="${DOWNLOAD_CLAIM_DIR:-$STATE_ROOT/slurm/state/download_claims}"
|
| 64 |
DOWNLOAD_CSV_LOCK_PATH="${DOWNLOAD_CSV_LOCK_PATH:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv.lock}"
|
| 65 |
DOWNLOAD_PARTITIONS="${DOWNLOAD_PARTITIONS:-main}"
|
|
|
|
| 75 |
DOWNLOAD_PARTIAL_TIMEOUT_SECONDS="${DOWNLOAD_PARTIAL_TIMEOUT_SECONDS:-1800}"
|
| 76 |
ORCHESTRATOR_PARTITION="${ORCHESTRATOR_PARTITION:-main}"
|
| 77 |
ORCHESTRATOR_ACCOUNT="${ORCHESTRATOR_ACCOUNT:-}"
|
| 78 |
+
ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-12:00:00}"
|
| 79 |
ORCHESTRATOR_CPUS_PER_TASK="${ORCHESTRATOR_CPUS_PER_TASK:-4}"
|
| 80 |
ORCHESTRATOR_MEM="${ORCHESTRATOR_MEM:-8G}"
|
| 81 |
RUN_LOCAL="${RUN_LOCAL:-0}"
|
|
|
|
| 117 |
--target-folders N
|
| 118 |
--download-batch-size N
|
| 119 |
--process-batch-size N
|
| 120 |
+
--videos-per-job N
|
| 121 |
--use-slurm-download {0,1}
|
| 122 |
--download-partitions P1[,P2,...]
|
| 123 |
--download-account NAME
|
|
|
|
| 130 |
--max-iterations N
|
| 131 |
--idle-sleep-seconds N
|
| 132 |
--repo-id REPO
|
| 133 |
+
--repo-revision BRANCH
|
| 134 |
--cookies FILE
|
| 135 |
--cookies-from-browser BROWSER
|
| 136 |
--extractor-args VALUE
|
|
|
|
| 275 |
PROCESS_BATCH_SIZE="$2"
|
| 276 |
shift 2
|
| 277 |
;;
|
| 278 |
+
--videos-per-job)
|
| 279 |
+
VIDEOS_PER_JOB="$2"
|
| 280 |
+
shift 2
|
| 281 |
+
;;
|
| 282 |
--raw-backlog-limit)
|
| 283 |
RAW_BACKLOG_LIMIT="$2"
|
| 284 |
shift 2
|
|
|
|
| 299 |
REPO_ID="$2"
|
| 300 |
shift 2
|
| 301 |
;;
|
| 302 |
+
--repo-revision)
|
| 303 |
+
REPO_REVISION="$2"
|
| 304 |
+
shift 2
|
| 305 |
+
;;
|
| 306 |
--cookies)
|
| 307 |
COOKIES_FILE="$2"
|
| 308 |
shift 2
|
|
|
|
| 371 |
echo "Missing orchestration wrapper: $wrapper" >&2
|
| 372 |
exit 1
|
| 373 |
fi
|
| 374 |
+
export_args="ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,SOURCE_METADATA_CSV=$SOURCE_METADATA_CSV,OUTPUT_METADATA_CSV=$OUTPUT_METADATA_CSV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,SCRATCH_RAW_VIDEO_DIR=$SCRATCH_RAW_VIDEO_DIR,HOME_RAW_VIDEO_LIMIT=$HOME_RAW_VIDEO_LIMIT,SCRATCH_RAW_VIDEO_LIMIT=$SCRATCH_RAW_VIDEO_LIMIT,RAW_CAPTION_DIR=$RAW_CAPTION_DIR,RAW_METADATA_DIR=$RAW_METADATA_DIR,DATASET_DIR=$DATASET_DIR,SCRATCH_DATASET_DIR=$SCRATCH_DATASET_DIR,ARCHIVE_DIR=$ARCHIVE_DIR,STATS_NPZ=$STATS_NPZ,PROGRESS_JSON=$PROGRESS_JSON,STAGE=$STAGE,LIMIT=$LIMIT,FPS=$FPS,WORKERS=$WORKERS,TARGET_BYTES=$TARGET_BYTES,DOWNLOAD_BATCH_SIZE=$DOWNLOAD_BATCH_SIZE,DOWNLOAD_WORKERS=$DOWNLOAD_WORKERS,USE_SLURM_DOWNLOAD=$USE_SLURM_DOWNLOAD,SLURM_DOWNLOAD_SUBMIT_SCRIPT=$SLURM_DOWNLOAD_SUBMIT_SCRIPT,DOWNLOAD_PARTITIONS=$DOWNLOAD_PARTITIONS,DOWNLOAD_ACCOUNT=$DOWNLOAD_ACCOUNT,DOWNLOAD_TIME=$DOWNLOAD_TIME,DOWNLOAD_CPUS_PER_TASK=$DOWNLOAD_CPUS_PER_TASK,DOWNLOAD_MEM=$DOWNLOAD_MEM,DOWNLOAD_ARRAY_PARALLEL=$DOWNLOAD_ARRAY_PARALLEL,DOWNLOAD_MAX_ACTIVE=$DOWNLOAD_MAX_ACTIVE,DOWNLOAD_START_STAGGER_MIN=$DOWNLOAD_START_STAGGER_MIN,DOWNLOAD_START_STAGGER_MAX=$DOWNLOAD_START_STAGGER_MAX,PROCESS_BATCH_SIZE=$PROCESS_BATCH_SIZE,VIDEOS_PER_JOB=$VIDEOS_PER_JOB,DOWNLOAD_CLAIM_DIR=$DOWNLOAD_CLAIM_DIR,DOWNLOAD_CSV_LOCK_PATH=$DOWNLOAD_CSV_LOCK_PATH,MIN_PROCESS_START_BACKLOG=$MIN_PROCESS_START_BACKLOG,RAW_BACKLOG_LIMIT=$RAW_BACKLOG_LIMIT,MAX_RAW_VIDEO_BYTES=$MAX_RAW_VIDEO_BYTES,MAX_ITERATIONS=$MAX_ITERATIONS,IDLE_SLEEP_SECONDS=$IDLE_SLEEP_SECONDS,REPO_ID=$REPO_ID,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS,GPU_PARTITIONS=$GPU_PARTITIONS,GPU_ACCOUNT=$GPU_ACCOUNT,ARRAY_PARALLEL=$ARRAY_PARALLEL,MAX_BACKLOG_VIDEOS=$MAX_BACKLOG_VIDEOS,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,FORCE_PROCESS=$FORCE_PROCESS,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,DRY_RUN_UPLOAD=$DRY_RUN_UPLOAD,RUN_LOCAL=1"
|
| 375 |
if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
|
| 376 |
export VIDEO_IDS_JOINED
|
| 377 |
VIDEO_IDS_JOINED="${VIDEO_IDS[*]}"
|
| 378 |
export_args+=",VIDEO_IDS_JOINED=$VIDEO_IDS_JOINED"
|
| 379 |
fi
|
| 380 |
+
mkdir -p "$RUNTIME_LOG_ROOT"
|
| 381 |
cmd=(sbatch
|
| 382 |
--partition "$ORCHESTRATOR_PARTITION"
|
| 383 |
--time "$ORCHESTRATOR_TIME"
|
| 384 |
--cpus-per-task "$ORCHESTRATOR_CPUS_PER_TASK"
|
| 385 |
--mem "$ORCHESTRATOR_MEM"
|
| 386 |
+
--output "$RUNTIME_LOG_ROOT/sign-dwpose-orch_%j.out"
|
| 387 |
+
--error "$RUNTIME_LOG_ROOT/sign-dwpose-orch_%j.err"
|
| 388 |
+
--export "$export_args,RUNTIME_LOG_ROOT=$RUNTIME_LOG_ROOT"
|
| 389 |
)
|
| 390 |
if [[ -n "$ORCHESTRATOR_ACCOUNT" ]]; then
|
| 391 |
cmd+=(--account "$ORCHESTRATOR_ACCOUNT")
|
|
|
|
| 400 |
IFS=' ' read -r -a VIDEO_IDS <<< "$VIDEO_IDS_JOINED"
|
| 401 |
fi
|
| 402 |
|
| 403 |
+
mkdir -p "$RAW_VIDEO_DIR" "$SCRATCH_RAW_VIDEO_DIR" "$RAW_CAPTION_DIR" "$RAW_METADATA_DIR" "$DATASET_DIR" "$SCRATCH_DATASET_DIR"
|
| 404 |
if [[ ! -x "$SLURM_PROCESS_SUBMIT_SCRIPT" ]]; then
|
| 405 |
echo "Missing Slurm submit script: $SLURM_PROCESS_SUBMIT_SCRIPT" >&2
|
| 406 |
exit 1
|
|
|
|
| 423 |
--time "$DOWNLOAD_TIME"
|
| 424 |
--cpus-per-task "$DOWNLOAD_CPUS_PER_TASK"
|
| 425 |
--mem "$DOWNLOAD_MEM"
|
| 426 |
+
--max-backlog-videos "$RAW_BACKLOG_LIMIT" \
|
| 427 |
+
--scratch-raw-video-dir "$SCRATCH_RAW_VIDEO_DIR" \
|
| 428 |
+
--home-raw-video-limit "$HOME_RAW_VIDEO_LIMIT" \
|
| 429 |
+
--scratch-raw-video-limit "$SCRATCH_RAW_VIDEO_LIMIT"
|
| 430 |
--workers "$DOWNLOAD_WORKERS"
|
| 431 |
--max-active-downloads "$DOWNLOAD_MAX_ACTIVE"
|
| 432 |
--claim-dir "$DOWNLOAD_CLAIM_DIR"
|
|
|
|
| 480 |
local cmd=(python "$PIPELINE01"
|
| 481 |
--source-metadata-csv "$SOURCE_METADATA_CSV"
|
| 482 |
--output-metadata-csv "$OUTPUT_METADATA_CSV"
|
| 483 |
+
--raw-video-dir "$RAW_VIDEO_DIR" \
|
| 484 |
+
--scratch-raw-video-dir "$SCRATCH_RAW_VIDEO_DIR"
|
| 485 |
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 486 |
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 487 |
--dataset-dir "$DATASET_DIR"
|
| 488 |
+
--scratch-dataset-dir "$SCRATCH_DATASET_DIR"
|
| 489 |
--stats-npz "$STATS_NPZ"
|
| 490 |
+
--status-journal-path "$RUNTIME_ROOT/upload_status_journal.jsonl"
|
| 491 |
--claim-dir "$DOWNLOAD_CLAIM_DIR"
|
| 492 |
--csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH"
|
| 493 |
)
|
|
|
|
| 554 |
if [[ -n "$ARRAY_PARALLEL" ]]; then
|
| 555 |
cmd+=(--array-parallel "$ARRAY_PARALLEL")
|
| 556 |
fi
|
| 557 |
+
if [[ -n "$VIDEOS_PER_JOB" ]]; then
|
| 558 |
+
cmd+=(--videos-per-job "$VIDEOS_PER_JOB")
|
| 559 |
+
fi
|
| 560 |
if [[ -n "$GPU_ACCOUNT" ]]; then
|
| 561 |
cmd+=(--account "$GPU_ACCOUNT")
|
| 562 |
fi
|
|
|
|
| 578 |
--source-metadata-csv "$SOURCE_METADATA_CSV" \
|
| 579 |
--output-metadata-csv "$OUTPUT_METADATA_CSV" \
|
| 580 |
--raw-video-dir "$RAW_VIDEO_DIR" \
|
| 581 |
+
--scratch-raw-video-dir "$SCRATCH_RAW_VIDEO_DIR" \
|
| 582 |
--raw-caption-dir "$RAW_CAPTION_DIR" \
|
| 583 |
--raw-metadata-dir "$RAW_METADATA_DIR" \
|
| 584 |
--dataset-dir "$DATASET_DIR" \
|
| 585 |
+
--scratch-dataset-dir "$SCRATCH_DATASET_DIR" \
|
| 586 |
--progress-path "$PROGRESS_JSON" \
|
| 587 |
--status-journal-path "$RUNTIME_ROOT/upload_status_journal.jsonl"
|
| 588 |
}
|
|
|
|
| 591 |
local require_target="${1:-0}"
|
| 592 |
local cmd=(python "$PIPELINE03"
|
| 593 |
--dataset-dir "$DATASET_DIR"
|
| 594 |
+
--scratch-dataset-dir "$SCRATCH_DATASET_DIR"
|
| 595 |
--raw-video-dir "$RAW_VIDEO_DIR"
|
| 596 |
+
--scratch-raw-video-dir "$SCRATCH_RAW_VIDEO_DIR"
|
| 597 |
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 598 |
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 599 |
--archive-dir "$ARCHIVE_DIR"
|
| 600 |
--progress-path "$PROGRESS_JSON"
|
| 601 |
--stats-npz "$STATS_NPZ"
|
| 602 |
+
--status-journal-path "$RUNTIME_ROOT/upload_status_journal.jsonl"
|
| 603 |
--repo-id "$REPO_ID"
|
| 604 |
+
--repo-revision "$REPO_REVISION"
|
| 605 |
--target-bytes "$TARGET_BYTES"
|
| 606 |
--target-folders "$TARGET_FOLDERS"
|
| 607 |
)
|
|
|
|
| 619 |
prune_processed_raw_videos() {
|
| 620 |
python - <<PY
|
| 621 |
from pathlib import Path
|
| 622 |
+
raw_dirs = [Path("$RAW_VIDEO_DIR"), Path("$SCRATCH_RAW_VIDEO_DIR")]
|
| 623 |
+
dataset_dirs = [Path("$DATASET_DIR"), Path("$SCRATCH_DATASET_DIR")]
|
| 624 |
video_extensions = {".mp4", ".mkv", ".webm", ".mov"}
|
| 625 |
removed = 0
|
| 626 |
+
complete = set()
|
| 627 |
+
for current_dataset_dir in dataset_dirs:
|
| 628 |
+
if current_dataset_dir.exists():
|
| 629 |
+
complete.update({p.parent.parent.name for p in current_dataset_dir.glob("*/npz/.complete")})
|
| 630 |
+
for raw_dir in raw_dirs:
|
| 631 |
+
if raw_dir.exists():
|
| 632 |
+
for video_path in raw_dir.iterdir():
|
| 633 |
+
if not video_path.is_file() or video_path.suffix.lower() not in video_extensions:
|
| 634 |
+
continue
|
| 635 |
+
if video_path.stem in complete:
|
| 636 |
+
video_path.unlink(missing_ok=True)
|
| 637 |
+
removed += 1
|
| 638 |
print(removed)
|
| 639 |
PY
|
| 640 |
}
|
|
|
|
| 659 |
count_pending_process() {
|
| 660 |
python - <<PY
|
| 661 |
from pathlib import Path
|
| 662 |
+
raw_dirs = [Path("$RAW_VIDEO_DIR"), Path("$SCRATCH_RAW_VIDEO_DIR")]
|
| 663 |
video_extensions = {".mp4", ".mkv", ".webm", ".mov"}
|
| 664 |
pending = 0
|
| 665 |
+
seen = set()
|
| 666 |
+
for raw_dir in raw_dirs:
|
| 667 |
+
if not raw_dir.exists():
|
| 668 |
+
continue
|
| 669 |
for video_path in raw_dir.iterdir():
|
| 670 |
+
if not video_path.is_file() or video_path.suffix.lower() not in video_extensions:
|
| 671 |
+
continue
|
| 672 |
+
if video_path.stem in seen:
|
| 673 |
+
continue
|
| 674 |
+
seen.add(video_path.stem)
|
| 675 |
+
pending += 1
|
| 676 |
print(pending)
|
| 677 |
PY
|
| 678 |
}
|
|
|
|
| 853 |
import time
|
| 854 |
from pathlib import Path
|
| 855 |
claim_dir = Path("$DOWNLOAD_CLAIM_DIR")
|
| 856 |
+
raw_dirs = [Path("$RAW_VIDEO_DIR"), Path("$SCRATCH_RAW_VIDEO_DIR")]
|
| 857 |
timeout = int("$DOWNLOAD_PARTIAL_TIMEOUT_SECONDS")
|
| 858 |
now = time.time()
|
| 859 |
active_ids = set()
|
|
|
|
| 864 |
except Exception:
|
| 865 |
pass
|
| 866 |
removed = 0
|
| 867 |
+
for raw_dir in raw_dirs:
|
| 868 |
+
if not raw_dir.exists():
|
| 869 |
+
continue
|
| 870 |
for path in raw_dir.iterdir():
|
| 871 |
if not path.is_file():
|
| 872 |
continue
|
|
|
|
| 913 |
|
| 914 |
count_active_process_claims() {
|
| 915 |
python - <<PY
|
| 916 |
+
import re
|
| 917 |
import subprocess
|
| 918 |
from datetime import datetime, timedelta
|
| 919 |
from pathlib import Path
|
| 920 |
+
|
| 921 |
claim_dir = Path("$STATE_ROOT/slurm/state/claims")
|
| 922 |
claim_dir.mkdir(parents=True, exist_ok=True)
|
| 923 |
timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS")
|
| 924 |
+
videos_per_job = int("${VIDEOS_PER_JOB:-1}")
|
| 925 |
now = datetime.now()
|
| 926 |
job_states = {}
|
| 927 |
base_states = {}
|
| 928 |
+
array_re = re.compile(r"^(?P<base>\d+)_\[(?P<start>\d+)-(?P<end>\d+)(?:%(?P<limit>\d+))?\]$")
|
| 929 |
+
|
| 930 |
+
def expand_job_token(job_token: str):
|
| 931 |
+
job_token = job_token.strip()
|
| 932 |
+
match = array_re.match(job_token)
|
| 933 |
+
if not match:
|
| 934 |
+
return [job_token]
|
| 935 |
+
base = match.group("base")
|
| 936 |
+
start = int(match.group("start"))
|
| 937 |
+
end = int(match.group("end"))
|
| 938 |
+
return [f"{base}_{index}" for index in range(start, end + 1)]
|
| 939 |
+
|
| 940 |
try:
|
| 941 |
result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%i|%T"], check=True, capture_output=True, text=True)
|
| 942 |
for line in result.stdout.splitlines():
|
| 943 |
if not line.strip() or "|" not in line:
|
| 944 |
continue
|
| 945 |
+
job_token, state = line.split("|", 1)
|
|
|
|
| 946 |
state = state.strip().upper()
|
| 947 |
+
if videos_per_job > 1:
|
| 948 |
+
for job_key in expand_job_token(job_token):
|
| 949 |
+
job_states[job_key] = state
|
| 950 |
+
base_states[job_key.split("_", 1)[0]] = state
|
| 951 |
+
else:
|
| 952 |
+
job_key = job_token.strip()
|
| 953 |
+
job_states[job_key] = state
|
| 954 |
+
base_states[job_key.split("_", 1)[0]] = state
|
| 955 |
except Exception:
|
| 956 |
job_states = {}
|
| 957 |
base_states = {}
|
|
|
|
| 980 |
else:
|
| 981 |
state = base_states.get(job_id)
|
| 982 |
if not state:
|
| 983 |
+
submitted_at = meta.get("submitted_at", "")
|
| 984 |
+
fresh_missing = False
|
| 985 |
+
if submitted_at:
|
| 986 |
+
try:
|
| 987 |
+
submitted_dt = datetime.strptime(submitted_at, "%Y-%m-%d %H:%M:%S")
|
| 988 |
+
fresh_missing = (now - submitted_dt) <= timedelta(seconds=min(timeout_seconds, 600))
|
| 989 |
+
except Exception:
|
| 990 |
+
fresh_missing = False
|
| 991 |
+
if fresh_missing:
|
| 992 |
+
count += 1
|
| 993 |
+
continue
|
| 994 |
claim_path.unlink(missing_ok=True)
|
| 995 |
continue
|
| 996 |
submitted_at = meta.get("submitted_at", "")
|
|
|
|
| 1002 |
except Exception:
|
| 1003 |
stale_pending = False
|
| 1004 |
if stale_pending:
|
| 1005 |
+
cancel_target = job_key or (f"{job_id}_{task_id}" if task_id else job_id)
|
| 1006 |
+
subprocess.run(["scancel", cancel_target], check=False)
|
| 1007 |
claim_path.unlink(missing_ok=True)
|
| 1008 |
continue
|
| 1009 |
count += 1
|
|
|
|
| 1013 |
|
| 1014 |
cleanup_stale_process_jobs() {
|
| 1015 |
python - <<PY
|
| 1016 |
+
import re
|
| 1017 |
import subprocess
|
| 1018 |
from datetime import datetime, timedelta
|
| 1019 |
from pathlib import Path
|
|
|
|
| 1021 |
claim_dir = Path("$STATE_ROOT/slurm/state/claims")
|
| 1022 |
claim_dir.mkdir(parents=True, exist_ok=True)
|
| 1023 |
timeout_seconds = int("$PROCESS_PENDING_TIMEOUT_SECONDS")
|
| 1024 |
+
videos_per_job = int("${VIDEOS_PER_JOB:-1}")
|
| 1025 |
now = datetime.now()
|
| 1026 |
|
| 1027 |
bad_reason_tokens = (
|
|
|
|
| 1035 |
timed_pending_reasons = ("Priority", "Resources", "QOS")
|
| 1036 |
|
| 1037 |
squeue_rows = {}
|
| 1038 |
+
array_re = re.compile(r"^(?P<base>\d+)_\[(?P<start>\d+)-(?P<end>\d+)(?:%(?P<limit>\d+))?\]$")
|
| 1039 |
+
|
| 1040 |
+
def expand_job_token(job_token: str):
|
| 1041 |
+
job_token = job_token.strip()
|
| 1042 |
+
match = array_re.match(job_token)
|
| 1043 |
+
if not match:
|
| 1044 |
+
return [job_token]
|
| 1045 |
+
base = match.group("base")
|
| 1046 |
+
start = int(match.group("start"))
|
| 1047 |
+
end = int(match.group("end"))
|
| 1048 |
+
return [f"{base}_{index}" for index in range(start, end + 1)]
|
| 1049 |
+
|
| 1050 |
try:
|
| 1051 |
proc = subprocess.run(
|
| 1052 |
["squeue", "-h", "-u", "$USER", "-n", "dwpose", "-o", "%i|%T|%R"],
|
|
|
|
| 1057 |
for line in proc.stdout.splitlines():
|
| 1058 |
if not line.strip() or "|" not in line:
|
| 1059 |
continue
|
| 1060 |
+
job_token, state, reason = line.split("|", 2)
|
| 1061 |
+
state = state.strip().upper()
|
| 1062 |
+
reason = reason.strip()
|
| 1063 |
+
if videos_per_job > 1:
|
| 1064 |
+
for job_key in expand_job_token(job_token):
|
| 1065 |
+
squeue_rows[job_key] = (state, reason)
|
| 1066 |
+
else:
|
| 1067 |
+
squeue_rows[job_token.strip()] = (state, reason)
|
| 1068 |
except Exception:
|
| 1069 |
squeue_rows = {}
|
| 1070 |
|
|
|
|
| 1091 |
continue
|
| 1092 |
row = squeue_rows.get(job_key)
|
| 1093 |
if not row:
|
| 1094 |
+
fresh_missing = False
|
| 1095 |
+
if submitted_at:
|
| 1096 |
+
try:
|
| 1097 |
+
submitted_dt = datetime.strptime(submitted_at, "%Y-%m-%d %H:%M:%S")
|
| 1098 |
+
fresh_missing = (now - submitted_dt) <= timedelta(seconds=min(timeout_seconds, 600))
|
| 1099 |
+
except Exception:
|
| 1100 |
+
fresh_missing = False
|
| 1101 |
+
if fresh_missing:
|
| 1102 |
+
continue
|
| 1103 |
claim_path.unlink(missing_ok=True)
|
| 1104 |
removed_claims += 1
|
| 1105 |
continue
|
|
|
|
| 1117 |
except Exception:
|
| 1118 |
pass
|
| 1119 |
if should_cancel:
|
| 1120 |
+
cancel_target = job_key or (f"{job_id}_{task_id}" if task_id else job_id)
|
| 1121 |
+
subprocess.run(["scancel", cancel_target], check=False)
|
| 1122 |
+
cancelled_jobs.add(cancel_target)
|
| 1123 |
claim_path.unlink(missing_ok=True)
|
| 1124 |
removed_claims += 1
|
| 1125 |
|
|
|
|
| 1183 |
}
|
| 1184 |
|
| 1185 |
count_complete_pending_upload() {
|
| 1186 |
+
PYTHONPATH="$ROOT_DIR${PYTHONPATH:+:$PYTHONPATH}" python - <<PY
|
| 1187 |
import json
|
| 1188 |
from pathlib import Path
|
| 1189 |
+
from utils.dataset_pool import list_unuploaded_folder_paths
|
| 1190 |
+
|
| 1191 |
dataset_dir = Path("$DATASET_DIR")
|
| 1192 |
+
scratch_dataset_dir = Path("$SCRATCH_DATASET_DIR")
|
| 1193 |
progress_path = Path("$PROGRESS_JSON")
|
| 1194 |
+
uploaded = {}
|
| 1195 |
if progress_path.exists():
|
| 1196 |
+
uploaded = json.loads(progress_path.read_text()).get("uploaded_folders", {})
|
| 1197 |
+
print(len(list_unuploaded_folder_paths(dataset_dir, scratch_dataset_dir, uploaded)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1198 |
PY
|
| 1199 |
}
|
| 1200 |
|
| 1201 |
bytes_complete_pending_upload() {
|
| 1202 |
+
PYTHONPATH="$ROOT_DIR${PYTHONPATH:+:$PYTHONPATH}" python - <<PY
|
| 1203 |
import json
|
| 1204 |
from pathlib import Path
|
| 1205 |
+
from utils.dataset_pool import list_unuploaded_folder_paths
|
| 1206 |
+
|
| 1207 |
dataset_dir = Path("$DATASET_DIR")
|
| 1208 |
+
scratch_dataset_dir = Path("$SCRATCH_DATASET_DIR")
|
| 1209 |
progress_path = Path("$PROGRESS_JSON")
|
| 1210 |
+
uploaded = {}
|
| 1211 |
if progress_path.exists():
|
| 1212 |
+
uploaded = json.loads(progress_path.read_text()).get("uploaded_folders", {})
|
| 1213 |
total = 0
|
| 1214 |
+
for _video_id, folder_path in list_unuploaded_folder_paths(dataset_dir, scratch_dataset_dir, uploaded):
|
| 1215 |
+
for path in folder_path.rglob("*"):
|
| 1216 |
+
if path.is_file():
|
| 1217 |
+
try:
|
| 1218 |
+
stat_result = path.stat()
|
| 1219 |
+
except FileNotFoundError:
|
| 1220 |
+
continue
|
| 1221 |
+
allocated = getattr(stat_result, "st_blocks", 0) * 512
|
| 1222 |
+
total += allocated if allocated > 0 else stat_result.st_size
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1223 |
print(total)
|
| 1224 |
PY
|
| 1225 |
}
|
|
|
|
| 1246 |
[[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0
|
| 1247 |
pending_process="$(count_pending_process 2>/dev/null || true)"
|
| 1248 |
[[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0
|
| 1249 |
+
raw_video_bytes="$(( $(dir_size_bytes "$RAW_VIDEO_DIR" 2>/dev/null || echo 0) + $(dir_size_bytes "$SCRATCH_RAW_VIDEO_DIR" 2>/dev/null || echo 0) ))"
|
| 1250 |
[[ "$raw_video_bytes" =~ ^[0-9]+$ ]] || raw_video_bytes=0
|
| 1251 |
echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned stale_download_claims=$stale_download_claims stale_download_partials=$stale_download_partials stale_process_jobs=$stale_process_jobs stale_process_claims=$stale_process_claims stale_orphan_process_jobs=$stale_orphan_process_jobs"
|
| 1252 |
|
|
|
|
| 1390 |
while true; do
|
| 1391 |
iteration=$((iteration + 1))
|
| 1392 |
local pruned
|
| 1393 |
+
pruned="$(prune_processed_raw_videos 2>/dev/null || true)"
|
| 1394 |
+
[[ "$pruned" =~ ^[0-9]+$ ]] || pruned=0
|
| 1395 |
local pending_download pending_process complete_pending_upload complete_pending_upload_bytes
|
| 1396 |
+
pending_download="$(count_pending_downloads 2>/dev/null || true)"
|
| 1397 |
+
[[ "$pending_download" =~ ^[0-9]+$ ]] || pending_download=0
|
| 1398 |
+
pending_process="$(count_pending_process 2>/dev/null || true)"
|
| 1399 |
+
[[ "$pending_process" =~ ^[0-9]+$ ]] || pending_process=0
|
| 1400 |
+
complete_pending_upload="$(count_complete_pending_upload 2>/dev/null || true)"
|
| 1401 |
+
[[ "$complete_pending_upload" =~ ^[0-9]+$ ]] || complete_pending_upload=0
|
| 1402 |
+
complete_pending_upload_bytes="$(bytes_complete_pending_upload 2>/dev/null || true)"
|
| 1403 |
+
[[ "$complete_pending_upload_bytes" =~ ^[0-9]+$ ]] || complete_pending_upload_bytes=0
|
| 1404 |
echo "[upload] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process complete_pending_upload=$complete_pending_upload complete_pending_upload_bytes=$complete_pending_upload_bytes pruned_raw_videos=$pruned"
|
| 1405 |
|
| 1406 |
if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
|
scripts/pipeline01_download_video_fix_caption.py
CHANGED
|
@@ -21,7 +21,13 @@ REPO_ROOT = Path(__file__).resolve().parents[1]
|
|
| 21 |
if str(REPO_ROOT) not in sys.path:
|
| 22 |
sys.path.insert(0, str(REPO_ROOT))
|
| 23 |
|
| 24 |
-
from utils.stats_npz import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
|
| 27 |
DEFAULT_SOURCE_METADATA_CSV = REPO_ROOT / "SignVerse-2M-metadata_ori.csv"
|
|
@@ -31,6 +37,7 @@ DEFAULT_RAW_CAPTION_DIR = REPO_ROOT / "raw_caption"
|
|
| 31 |
DEFAULT_RAW_METADATA_DIR = REPO_ROOT / "raw_metadata"
|
| 32 |
DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
|
| 33 |
DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
|
|
|
|
| 34 |
DEFAULT_YT_DLP_EXTRACTOR_ARGS = "youtube:player_client=web_safari,web"
|
| 35 |
COOKIE_DOMAINS = ("youtube.com", "google.com", "googlevideo.com", "ytimg.com")
|
| 36 |
TIMESTAMP_LINE_RE = re.compile(
|
|
@@ -66,10 +73,14 @@ def parse_args() -> argparse.Namespace:
|
|
| 66 |
parser.add_argument("--source-metadata-csv", type=Path, default=DEFAULT_SOURCE_METADATA_CSV)
|
| 67 |
parser.add_argument("--output-metadata-csv", type=Path, default=DEFAULT_OUTPUT_METADATA_CSV)
|
| 68 |
parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
|
|
|
|
|
|
|
|
|
|
| 69 |
parser.add_argument("--raw-caption-dir", type=Path, default=DEFAULT_RAW_CAPTION_DIR)
|
| 70 |
parser.add_argument("--raw-metadata-dir", type=Path, default=DEFAULT_RAW_METADATA_DIR)
|
| 71 |
parser.add_argument("--dataset-dir", type=Path, default=DEFAULT_DATASET_DIR)
|
| 72 |
parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
|
|
|
|
| 73 |
parser.add_argument("--limit", type=int, default=None)
|
| 74 |
parser.add_argument("--video-ids", nargs="*", default=None)
|
| 75 |
parser.add_argument("--force-metadata", action="store_true")
|
|
@@ -470,55 +481,52 @@ def subtitle_dir_for_video(dataset_dir: Path, video_id: str) -> Path:
|
|
| 470 |
return dataset_dir / video_id / "captions"
|
| 471 |
|
| 472 |
|
| 473 |
-
def find_video_file(raw_video_dir: Path, video_id: str) -> Path | None:
|
| 474 |
-
|
| 475 |
-
for path in raw_video_dir.glob(f"{video_id}.*"):
|
| 476 |
-
if path.suffix in {".mp4", ".mkv", ".webm", ".mov"}:
|
| 477 |
-
candidates.append(path)
|
| 478 |
-
return sorted(candidates)[0] if candidates else None
|
| 479 |
|
| 480 |
|
| 481 |
-
def iter_partial_download_files(raw_video_dir: Path, video_id: str) -> Iterable[Path]:
|
| 482 |
-
|
| 483 |
-
for path in raw_video_dir.glob(f"{video_id}*"):
|
| 484 |
-
if not path.is_file():
|
| 485 |
-
continue
|
| 486 |
-
suffixes = set(path.suffixes)
|
| 487 |
-
if ".part" in suffixes or ".ytdl" in suffixes or path.suffix in {".part", ".ytdl"}:
|
| 488 |
-
resolved = path.resolve()
|
| 489 |
-
if resolved not in seen:
|
| 490 |
-
seen.add(resolved)
|
| 491 |
-
yield path
|
| 492 |
|
| 493 |
|
| 494 |
-
def cleanup_partial_downloads(raw_video_dir: Path, video_id: str) -> None:
|
| 495 |
-
|
| 496 |
-
partial_path.unlink(missing_ok=True)
|
| 497 |
|
| 498 |
|
| 499 |
def download_video(video_id: str, raw_video_dir: Path, args: argparse.Namespace) -> Tuple[str, str]:
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
"--format",
|
| 508 |
-
"worstvideo*+worstaudio/worst",
|
| 509 |
-
"--format-sort",
|
| 510 |
-
"+res,+size,+br,+fps",
|
| 511 |
-
"--merge-output-format",
|
| 512 |
-
"mp4",
|
| 513 |
-
youtube_url(video_id),
|
| 514 |
-
]
|
| 515 |
)
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 522 |
|
| 523 |
|
| 524 |
def subtitle_file_language(path: Path, video_id: str) -> str:
|
|
@@ -853,7 +861,7 @@ def main() -> None:
|
|
| 853 |
failure_count, should_skip = record_row_failure(row, stats_record, metadata_error, args.max_failures_before_skip)
|
| 854 |
stats_record["last_error"] = row["error"]
|
| 855 |
persist_row_update(args, video_id, row, fieldnames)
|
| 856 |
-
|
| 857 |
print(f" metadata failed: {metadata_error}")
|
| 858 |
if should_skip:
|
| 859 |
print(f" skipping after {failure_count} failures")
|
|
@@ -908,7 +916,7 @@ def main() -> None:
|
|
| 908 |
row["raw_caption_dir"] = ""
|
| 909 |
|
| 910 |
try:
|
| 911 |
-
existing_video = find_video_file(args.raw_video_dir, video_id)
|
| 912 |
if args.skip_video_download:
|
| 913 |
row["download_status"] = "skipped"
|
| 914 |
row["raw_video_path"] = repo_relative_or_absolute(existing_video) if existing_video else ""
|
|
@@ -947,7 +955,7 @@ def main() -> None:
|
|
| 947 |
stats_record["last_error"] = row["error"]
|
| 948 |
stats_record["updated_at"] = row["processed_at"]
|
| 949 |
persist_row_update(args, video_id, row, fieldnames)
|
| 950 |
-
|
| 951 |
|
| 952 |
if row["download_status"] == "failed":
|
| 953 |
print(f" video download failed: {download_error}")
|
|
|
|
| 21 |
if str(REPO_ROOT) not in sys.path:
|
| 22 |
sys.path.insert(0, str(REPO_ROOT))
|
| 23 |
|
| 24 |
+
from utils.stats_npz import update_video_stats_best_effort
|
| 25 |
+
from utils.raw_video_pool import (
|
| 26 |
+
choose_download_target,
|
| 27 |
+
cleanup_partial_downloads as cleanup_partial_downloads_pool,
|
| 28 |
+
find_video_file as find_video_file_pool,
|
| 29 |
+
release_download_reservation,
|
| 30 |
+
)
|
| 31 |
|
| 32 |
|
| 33 |
DEFAULT_SOURCE_METADATA_CSV = REPO_ROOT / "SignVerse-2M-metadata_ori.csv"
|
|
|
|
| 37 |
DEFAULT_RAW_METADATA_DIR = REPO_ROOT / "raw_metadata"
|
| 38 |
DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
|
| 39 |
DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
|
| 40 |
+
DEFAULT_STATUS_JOURNAL_PATH = REPO_ROOT / "upload_status_journal.jsonl"
|
| 41 |
DEFAULT_YT_DLP_EXTRACTOR_ARGS = "youtube:player_client=web_safari,web"
|
| 42 |
COOKIE_DOMAINS = ("youtube.com", "google.com", "googlevideo.com", "ytimg.com")
|
| 43 |
TIMESTAMP_LINE_RE = re.compile(
|
|
|
|
| 73 |
parser.add_argument("--source-metadata-csv", type=Path, default=DEFAULT_SOURCE_METADATA_CSV)
|
| 74 |
parser.add_argument("--output-metadata-csv", type=Path, default=DEFAULT_OUTPUT_METADATA_CSV)
|
| 75 |
parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
|
| 76 |
+
parser.add_argument("--scratch-raw-video-dir", type=Path, default=None)
|
| 77 |
+
parser.add_argument("--home-raw-video-limit", type=int, default=180)
|
| 78 |
+
parser.add_argument("--scratch-raw-video-limit", type=int, default=2800)
|
| 79 |
parser.add_argument("--raw-caption-dir", type=Path, default=DEFAULT_RAW_CAPTION_DIR)
|
| 80 |
parser.add_argument("--raw-metadata-dir", type=Path, default=DEFAULT_RAW_METADATA_DIR)
|
| 81 |
parser.add_argument("--dataset-dir", type=Path, default=DEFAULT_DATASET_DIR)
|
| 82 |
parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
|
| 83 |
+
parser.add_argument("--status-journal-path", type=Path, default=DEFAULT_STATUS_JOURNAL_PATH)
|
| 84 |
parser.add_argument("--limit", type=int, default=None)
|
| 85 |
parser.add_argument("--video-ids", nargs="*", default=None)
|
| 86 |
parser.add_argument("--force-metadata", action="store_true")
|
|
|
|
| 481 |
return dataset_dir / video_id / "captions"
|
| 482 |
|
| 483 |
|
| 484 |
+
def find_video_file(raw_video_dir: Path, video_id: str, scratch_raw_video_dir: Path | None = None) -> Path | None:
|
| 485 |
+
return find_video_file_pool(video_id, raw_video_dir, scratch_raw_video_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 486 |
|
| 487 |
|
| 488 |
+
def iter_partial_download_files(raw_video_dir: Path, video_id: str, scratch_raw_video_dir: Path | None = None) -> Iterable[Path]:
|
| 489 |
+
yield from cleanup_partial_downloads_pool.__globals__['iter_partial_download_files'](video_id, raw_video_dir, scratch_raw_video_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 490 |
|
| 491 |
|
| 492 |
+
def cleanup_partial_downloads(raw_video_dir: Path, video_id: str, scratch_raw_video_dir: Path | None = None) -> None:
|
| 493 |
+
cleanup_partial_downloads_pool(video_id, raw_video_dir, scratch_raw_video_dir)
|
|
|
|
| 494 |
|
| 495 |
|
| 496 |
def download_video(video_id: str, raw_video_dir: Path, args: argparse.Namespace) -> Tuple[str, str]:
|
| 497 |
+
target_raw_video_dir, reservation_path = choose_download_target(
|
| 498 |
+
raw_video_dir,
|
| 499 |
+
args.scratch_raw_video_dir,
|
| 500 |
+
args.home_raw_video_limit,
|
| 501 |
+
args.scratch_raw_video_limit,
|
| 502 |
+
args.claim_dir,
|
| 503 |
+
video_id,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 504 |
)
|
| 505 |
+
target_raw_video_dir.mkdir(parents=True, exist_ok=True)
|
| 506 |
+
try:
|
| 507 |
+
cleanup_partial_downloads(raw_video_dir, video_id, args.scratch_raw_video_dir)
|
| 508 |
+
command = build_yt_dlp_base_command(args)
|
| 509 |
+
command.extend(
|
| 510 |
+
[
|
| 511 |
+
"--output",
|
| 512 |
+
str(target_raw_video_dir / "%(id)s.%(ext)s"),
|
| 513 |
+
"--format",
|
| 514 |
+
"worstvideo*+worstaudio/worst",
|
| 515 |
+
"--format-sort",
|
| 516 |
+
"+res,+size,+br,+fps",
|
| 517 |
+
"--merge-output-format",
|
| 518 |
+
"mp4",
|
| 519 |
+
youtube_url(video_id),
|
| 520 |
+
]
|
| 521 |
+
)
|
| 522 |
+
result = run_command(command)
|
| 523 |
+
video_path = find_video_file(raw_video_dir, video_id, args.scratch_raw_video_dir)
|
| 524 |
+
cleanup_partial_downloads(raw_video_dir, video_id, args.scratch_raw_video_dir)
|
| 525 |
+
if result.returncode != 0 and not video_path:
|
| 526 |
+
raise RuntimeError(result.stderr.strip() or result.stdout.strip() or "video download failed")
|
| 527 |
+
return repo_relative_or_absolute(video_path) if video_path else "", ""
|
| 528 |
+
finally:
|
| 529 |
+
release_download_reservation(reservation_path)
|
| 530 |
|
| 531 |
|
| 532 |
def subtitle_file_language(path: Path, video_id: str) -> str:
|
|
|
|
| 861 |
failure_count, should_skip = record_row_failure(row, stats_record, metadata_error, args.max_failures_before_skip)
|
| 862 |
stats_record["last_error"] = row["error"]
|
| 863 |
persist_row_update(args, video_id, row, fieldnames)
|
| 864 |
+
update_video_stats_best_effort(args.stats_npz, args.status_journal_path, video_id, **stats_record)
|
| 865 |
print(f" metadata failed: {metadata_error}")
|
| 866 |
if should_skip:
|
| 867 |
print(f" skipping after {failure_count} failures")
|
|
|
|
| 916 |
row["raw_caption_dir"] = ""
|
| 917 |
|
| 918 |
try:
|
| 919 |
+
existing_video = find_video_file(args.raw_video_dir, video_id, args.scratch_raw_video_dir)
|
| 920 |
if args.skip_video_download:
|
| 921 |
row["download_status"] = "skipped"
|
| 922 |
row["raw_video_path"] = repo_relative_or_absolute(existing_video) if existing_video else ""
|
|
|
|
| 955 |
stats_record["last_error"] = row["error"]
|
| 956 |
stats_record["updated_at"] = row["processed_at"]
|
| 957 |
persist_row_update(args, video_id, row, fieldnames)
|
| 958 |
+
update_video_stats_best_effort(args.stats_npz, args.status_journal_path, video_id, **stats_record)
|
| 959 |
|
| 960 |
if row["download_status"] == "failed":
|
| 961 |
print(f" video download failed: {download_error}")
|
scripts/pipeline02_extract_dwpose_from_video.py
CHANGED
|
@@ -29,12 +29,15 @@ REPO_ROOT = Path(__file__).resolve().parents[1]
|
|
| 29 |
if str(REPO_ROOT) not in sys.path:
|
| 30 |
sys.path.insert(0, str(REPO_ROOT))
|
| 31 |
|
| 32 |
-
from utils.stats_npz import load_stats,
|
|
|
|
|
|
|
| 33 |
|
| 34 |
|
| 35 |
DEFAULT_RAW_VIDEO_DIR = REPO_ROOT / "raw_video"
|
| 36 |
DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
|
| 37 |
DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
|
|
|
|
| 38 |
VIDEO_EXTENSIONS = {".mp4", ".mkv", ".webm", ".mov"}
|
| 39 |
COMPLETE_MARKER_NAME = ".complete"
|
| 40 |
|
|
@@ -93,7 +96,9 @@ def parse_args() -> argparse.Namespace:
|
|
| 93 |
description="Extract DWpose NPZ files from raw videos."
|
| 94 |
)
|
| 95 |
parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
|
|
|
|
| 96 |
parser.add_argument("--dataset-dir", type=Path, default=DEFAULT_DATASET_DIR)
|
|
|
|
| 97 |
parser.add_argument("--fps", type=int, default=24)
|
| 98 |
parser.add_argument("--limit", type=int, default=None)
|
| 99 |
parser.add_argument("--workers", type=int, default=None)
|
|
@@ -102,6 +107,7 @@ def parse_args() -> argparse.Namespace:
|
|
| 102 |
parser.add_argument("--delete-source-on-success", action="store_true")
|
| 103 |
parser.add_argument("--tmp-root", type=Path, default=Path("/tmp"))
|
| 104 |
parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
|
|
|
|
| 105 |
parser.add_argument(
|
| 106 |
"--single-poses-npz",
|
| 107 |
dest="single_poses_npz",
|
|
@@ -185,18 +191,21 @@ def parse_args() -> argparse.Namespace:
|
|
| 185 |
|
| 186 |
def select_video_paths(args: argparse.Namespace) -> List[Path]:
|
| 187 |
args.raw_video_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
| 188 |
args.dataset_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
| 189 |
video_id_filter = set(args.video_ids or [])
|
| 190 |
stats = load_stats(args.stats_npz)
|
| 191 |
|
| 192 |
selected = []
|
| 193 |
-
for path in sorted(args.raw_video_dir.
|
| 194 |
-
if not path.is_file() or path.suffix.lower() not in VIDEO_EXTENSIONS:
|
| 195 |
-
continue
|
| 196 |
video_id = path.stem
|
| 197 |
if video_id_filter and video_id not in video_id_filter:
|
| 198 |
continue
|
| 199 |
-
|
|
|
|
| 200 |
complete_marker = npz_dir / COMPLETE_MARKER_NAME
|
| 201 |
if (
|
| 202 |
not args.force
|
|
@@ -566,6 +575,9 @@ def optimized_process_frame_batch(
|
|
| 566 |
def process_video(
|
| 567 |
video_path: Path,
|
| 568 |
dataset_dir: Path,
|
|
|
|
|
|
|
|
|
|
| 569 |
fps: int,
|
| 570 |
detector: DWposeDetector,
|
| 571 |
tmp_root: Path,
|
|
@@ -578,7 +590,8 @@ def process_video(
|
|
| 578 |
optimized_frame_stride: int,
|
| 579 |
) -> None:
|
| 580 |
video_id = video_path.stem
|
| 581 |
-
|
|
|
|
| 582 |
complete_marker = output_npz_dir / COMPLETE_MARKER_NAME
|
| 583 |
poses_npz_path = output_npz_dir / "poses.npz"
|
| 584 |
if output_npz_dir.exists() and complete_marker.exists() and not force:
|
|
@@ -732,8 +745,9 @@ def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argp
|
|
| 732 |
if index % worker_count != rank:
|
| 733 |
continue
|
| 734 |
try:
|
| 735 |
-
|
| 736 |
args.stats_npz,
|
|
|
|
| 737 |
video_path.stem,
|
| 738 |
process_status="running",
|
| 739 |
last_error="",
|
|
@@ -742,6 +756,9 @@ def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argp
|
|
| 742 |
process_video(
|
| 743 |
video_path=video_path,
|
| 744 |
dataset_dir=args.dataset_dir,
|
|
|
|
|
|
|
|
|
|
| 745 |
fps=args.fps,
|
| 746 |
detector=detector,
|
| 747 |
tmp_root=args.tmp_root,
|
|
@@ -753,8 +770,9 @@ def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argp
|
|
| 753 |
optimized_detect_resolution=args.optimized_detect_resolution,
|
| 754 |
optimized_frame_stride=args.optimized_frame_stride,
|
| 755 |
)
|
| 756 |
-
|
| 757 |
args.stats_npz,
|
|
|
|
| 758 |
video_path.stem,
|
| 759 |
process_status="ok",
|
| 760 |
last_error="",
|
|
@@ -764,8 +782,9 @@ def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argp
|
|
| 764 |
video_path.unlink()
|
| 765 |
print(f"Worker {rank}: deleted source video {video_path.name}")
|
| 766 |
except Exception as exc:
|
| 767 |
-
|
| 768 |
args.stats_npz,
|
|
|
|
| 769 |
video_path.stem,
|
| 770 |
process_status="failed",
|
| 771 |
last_error=str(exc),
|
|
|
|
| 29 |
if str(REPO_ROOT) not in sys.path:
|
| 30 |
sys.path.insert(0, str(REPO_ROOT))
|
| 31 |
|
| 32 |
+
from utils.stats_npz import load_stats, update_video_stats_best_effort
|
| 33 |
+
from utils.raw_video_pool import iter_raw_video_files
|
| 34 |
+
from utils.dataset_pool import dataset_dir_for_video, find_dataset_video_dir
|
| 35 |
|
| 36 |
|
| 37 |
DEFAULT_RAW_VIDEO_DIR = REPO_ROOT / "raw_video"
|
| 38 |
DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
|
| 39 |
DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
|
| 40 |
+
DEFAULT_STATUS_JOURNAL_PATH = REPO_ROOT / "upload_status_journal.jsonl"
|
| 41 |
VIDEO_EXTENSIONS = {".mp4", ".mkv", ".webm", ".mov"}
|
| 42 |
COMPLETE_MARKER_NAME = ".complete"
|
| 43 |
|
|
|
|
| 96 |
description="Extract DWpose NPZ files from raw videos."
|
| 97 |
)
|
| 98 |
parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
|
| 99 |
+
parser.add_argument("--scratch-raw-video-dir", type=Path, default=None)
|
| 100 |
parser.add_argument("--dataset-dir", type=Path, default=DEFAULT_DATASET_DIR)
|
| 101 |
+
parser.add_argument("--scratch-dataset-dir", type=Path, default=None)
|
| 102 |
parser.add_argument("--fps", type=int, default=24)
|
| 103 |
parser.add_argument("--limit", type=int, default=None)
|
| 104 |
parser.add_argument("--workers", type=int, default=None)
|
|
|
|
| 107 |
parser.add_argument("--delete-source-on-success", action="store_true")
|
| 108 |
parser.add_argument("--tmp-root", type=Path, default=Path("/tmp"))
|
| 109 |
parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
|
| 110 |
+
parser.add_argument("--status-journal-path", type=Path, default=DEFAULT_STATUS_JOURNAL_PATH)
|
| 111 |
parser.add_argument(
|
| 112 |
"--single-poses-npz",
|
| 113 |
dest="single_poses_npz",
|
|
|
|
| 191 |
|
| 192 |
def select_video_paths(args: argparse.Namespace) -> List[Path]:
|
| 193 |
args.raw_video_dir.mkdir(parents=True, exist_ok=True)
|
| 194 |
+
if args.scratch_raw_video_dir is not None:
|
| 195 |
+
args.scratch_raw_video_dir.mkdir(parents=True, exist_ok=True)
|
| 196 |
args.dataset_dir.mkdir(parents=True, exist_ok=True)
|
| 197 |
+
if args.scratch_dataset_dir is not None:
|
| 198 |
+
args.scratch_dataset_dir.mkdir(parents=True, exist_ok=True)
|
| 199 |
video_id_filter = set(args.video_ids or [])
|
| 200 |
stats = load_stats(args.stats_npz)
|
| 201 |
|
| 202 |
selected = []
|
| 203 |
+
for path in sorted(iter_raw_video_files(args.raw_video_dir, args.scratch_raw_video_dir), key=lambda p: (p.stem, str(p))):
|
|
|
|
|
|
|
| 204 |
video_id = path.stem
|
| 205 |
if video_id_filter and video_id not in video_id_filter:
|
| 206 |
continue
|
| 207 |
+
dataset_root = find_dataset_video_dir(video_id, args.dataset_dir, args.scratch_dataset_dir)
|
| 208 |
+
npz_dir = dataset_root / video_id / "npz"
|
| 209 |
complete_marker = npz_dir / COMPLETE_MARKER_NAME
|
| 210 |
if (
|
| 211 |
not args.force
|
|
|
|
| 575 |
def process_video(
|
| 576 |
video_path: Path,
|
| 577 |
dataset_dir: Path,
|
| 578 |
+
scratch_dataset_dir: Path | None,
|
| 579 |
+
raw_video_dir: Path,
|
| 580 |
+
scratch_raw_video_dir: Path | None,
|
| 581 |
fps: int,
|
| 582 |
detector: DWposeDetector,
|
| 583 |
tmp_root: Path,
|
|
|
|
| 590 |
optimized_frame_stride: int,
|
| 591 |
) -> None:
|
| 592 |
video_id = video_path.stem
|
| 593 |
+
output_dataset_dir = dataset_dir_for_video(video_path, raw_video_dir, scratch_raw_video_dir, dataset_dir, scratch_dataset_dir)
|
| 594 |
+
output_npz_dir = output_dataset_dir / video_id / "npz"
|
| 595 |
complete_marker = output_npz_dir / COMPLETE_MARKER_NAME
|
| 596 |
poses_npz_path = output_npz_dir / "poses.npz"
|
| 597 |
if output_npz_dir.exists() and complete_marker.exists() and not force:
|
|
|
|
| 745 |
if index % worker_count != rank:
|
| 746 |
continue
|
| 747 |
try:
|
| 748 |
+
update_video_stats_best_effort(
|
| 749 |
args.stats_npz,
|
| 750 |
+
args.status_journal_path,
|
| 751 |
video_path.stem,
|
| 752 |
process_status="running",
|
| 753 |
last_error="",
|
|
|
|
| 756 |
process_video(
|
| 757 |
video_path=video_path,
|
| 758 |
dataset_dir=args.dataset_dir,
|
| 759 |
+
scratch_dataset_dir=args.scratch_dataset_dir,
|
| 760 |
+
raw_video_dir=args.raw_video_dir,
|
| 761 |
+
scratch_raw_video_dir=args.scratch_raw_video_dir,
|
| 762 |
fps=args.fps,
|
| 763 |
detector=detector,
|
| 764 |
tmp_root=args.tmp_root,
|
|
|
|
| 770 |
optimized_detect_resolution=args.optimized_detect_resolution,
|
| 771 |
optimized_frame_stride=args.optimized_frame_stride,
|
| 772 |
)
|
| 773 |
+
update_video_stats_best_effort(
|
| 774 |
args.stats_npz,
|
| 775 |
+
args.status_journal_path,
|
| 776 |
video_path.stem,
|
| 777 |
process_status="ok",
|
| 778 |
last_error="",
|
|
|
|
| 782 |
video_path.unlink()
|
| 783 |
print(f"Worker {rank}: deleted source video {video_path.name}")
|
| 784 |
except Exception as exc:
|
| 785 |
+
update_video_stats_best_effort(
|
| 786 |
args.stats_npz,
|
| 787 |
+
args.status_journal_path,
|
| 788 |
video_path.stem,
|
| 789 |
process_status="failed",
|
| 790 |
last_error=str(exc),
|
scripts/pipeline03_upload_to_huggingface.py
CHANGED
|
@@ -1,8 +1,12 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
|
| 3 |
import argparse
|
|
|
|
|
|
|
|
|
|
| 4 |
import json
|
| 5 |
import os
|
|
|
|
| 6 |
import shutil
|
| 7 |
import subprocess
|
| 8 |
import sys
|
|
@@ -20,7 +24,8 @@ REPO_ROOT = Path(__file__).resolve().parents[1]
|
|
| 20 |
if str(REPO_ROOT) not in sys.path:
|
| 21 |
sys.path.insert(0, str(REPO_ROOT))
|
| 22 |
|
| 23 |
-
from utils.stats_npz import update_many_video_stats_with_retry
|
|
|
|
| 24 |
|
| 25 |
|
| 26 |
DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
|
|
@@ -43,17 +48,26 @@ def parse_args() -> argparse.Namespace:
|
|
| 43 |
description="Archive NPZ folders into 14GB tar files and upload them to Hugging Face."
|
| 44 |
)
|
| 45 |
parser.add_argument("--dataset-dir", type=Path, default=DEFAULT_DATASET_DIR)
|
|
|
|
| 46 |
parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
|
|
|
|
| 47 |
parser.add_argument("--raw-caption-dir", type=Path, default=DEFAULT_RAW_CAPTION_DIR)
|
| 48 |
parser.add_argument("--raw-metadata-dir", type=Path, default=DEFAULT_RAW_METADATA_DIR)
|
| 49 |
parser.add_argument("--archive-dir", type=Path, default=DEFAULT_ARCHIVE_DIR)
|
| 50 |
parser.add_argument("--progress-path", type=Path, default=DEFAULT_PROGRESS_PATH)
|
| 51 |
parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
|
| 52 |
parser.add_argument("--status-journal-path", type=Path, default=DEFAULT_STATUS_JOURNAL_PATH)
|
|
|
|
| 53 |
parser.add_argument("--repo-id", default="SignerX/SignVerse-2M")
|
| 54 |
parser.add_argument("--repo-type", default="dataset")
|
|
|
|
| 55 |
parser.add_argument("--target-bytes", type=int, default=DEFAULT_TARGET_BYTES)
|
| 56 |
parser.add_argument("--target-folders", type=int, default=DEFAULT_TARGET_FOLDERS)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
parser.add_argument("--require-target-bytes", action="store_true", default=True)
|
| 58 |
parser.add_argument("--allow-small-final-batch", action="store_true")
|
| 59 |
parser.add_argument("--dry-run", action="store_true")
|
|
@@ -78,6 +92,22 @@ def resolve_token(cli_token: str | None) -> str | None:
|
|
| 78 |
|
| 79 |
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
def append_status_journal(journal_path: Path, video_ids: Sequence[str], **updates: str) -> None:
|
| 82 |
journal_path.parent.mkdir(parents=True, exist_ok=True)
|
| 83 |
payload = {
|
|
@@ -85,11 +115,15 @@ def append_status_journal(journal_path: Path, video_ids: Sequence[str], **update
|
|
| 85 |
"updates": {k: ("" if v is None else str(v)) for k, v in updates.items()},
|
| 86 |
"recorded_at": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 87 |
}
|
| 88 |
-
with
|
| 89 |
-
|
|
|
|
| 90 |
|
| 91 |
|
| 92 |
-
def update_many_video_stats_best_effort(stats_path: Path, journal_path: Path, video_ids: Sequence[str], **updates: str) -> None:
|
|
|
|
|
|
|
|
|
|
| 93 |
try:
|
| 94 |
update_many_video_stats_with_retry(stats_path, video_ids, **updates)
|
| 95 |
except Exception as exc:
|
|
@@ -98,17 +132,62 @@ def update_many_video_stats_best_effort(stats_path: Path, journal_path: Path, vi
|
|
| 98 |
append_status_journal(journal_path, video_ids, **payload)
|
| 99 |
print(f"Warning: stats.npz update deferred to journal due to: {exc}")
|
| 100 |
|
| 101 |
-
def
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
return {"archives": {}, "uploaded_folders": {}}
|
| 106 |
|
| 107 |
|
| 108 |
def save_progress(progress_path: Path, progress: Dict[str, object]) -> None:
|
| 109 |
progress_path.parent.mkdir(parents=True, exist_ok=True)
|
| 110 |
-
|
|
|
|
| 111 |
json.dump(progress, handle, ensure_ascii=False, indent=2)
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
|
| 114 |
def folder_size_bytes(folder_path: Path) -> int:
|
|
@@ -125,20 +204,9 @@ def folder_size_bytes(folder_path: Path) -> int:
|
|
| 125 |
return total
|
| 126 |
|
| 127 |
|
| 128 |
-
def list_unuploaded_folder_paths(dataset_dir: Path, progress: Dict[str, object]) -> List[Tuple[str, Path]]:
|
| 129 |
uploaded_folders = progress.get("uploaded_folders", {})
|
| 130 |
-
|
| 131 |
-
for folder_path in sorted(dataset_dir.iterdir()):
|
| 132 |
-
if not folder_path.is_dir():
|
| 133 |
-
continue
|
| 134 |
-
folder_name = folder_path.name
|
| 135 |
-
if folder_name in uploaded_folders:
|
| 136 |
-
continue
|
| 137 |
-
npz_dir = folder_path / "npz"
|
| 138 |
-
if not (npz_dir / COMPLETE_MARKER_NAME).exists():
|
| 139 |
-
continue
|
| 140 |
-
folders.append((folder_name, folder_path))
|
| 141 |
-
return folders
|
| 142 |
|
| 143 |
|
| 144 |
def enrich_folder_sizes(folders: Sequence[Tuple[str, Path]]) -> List[Tuple[str, Path, int]]:
|
|
@@ -188,23 +256,24 @@ def preferred_temp_archive_dir() -> Path:
|
|
| 188 |
path.mkdir(parents=True, exist_ok=True)
|
| 189 |
return path
|
| 190 |
|
| 191 |
-
def create_tar_archive(archive_path: Path,
|
| 192 |
archive_path.parent.mkdir(parents=True, exist_ok=True)
|
| 193 |
with tarfile.open(archive_path, mode="w") as tar:
|
| 194 |
-
for folder_name in
|
| 195 |
-
tar.add(
|
| 196 |
|
| 197 |
|
| 198 |
-
def upload_archive(api: HfApi, repo_id: str, repo_type: str, archive_path: Path) -> None:
|
| 199 |
api.upload_file(
|
| 200 |
path_or_fileobj=str(archive_path),
|
| 201 |
path_in_repo=f"dataset/{archive_path.name}",
|
| 202 |
repo_id=repo_id,
|
| 203 |
repo_type=repo_type,
|
|
|
|
| 204 |
)
|
| 205 |
|
| 206 |
|
| 207 |
-
def upload_runtime_state_files(api: HfApi | None, repo_id: str, repo_type: str, progress_path: Path, journal_path: Path) -> None:
|
| 208 |
if api is None:
|
| 209 |
return
|
| 210 |
api.upload_file(
|
|
@@ -212,6 +281,7 @@ def upload_runtime_state_files(api: HfApi | None, repo_id: str, repo_type: str,
|
|
| 212 |
path_in_repo="runtime_state/archive_upload_progress.json",
|
| 213 |
repo_id=repo_id,
|
| 214 |
repo_type=repo_type,
|
|
|
|
| 215 |
)
|
| 216 |
if journal_path.exists():
|
| 217 |
api.upload_file(
|
|
@@ -219,13 +289,32 @@ def upload_runtime_state_files(api: HfApi | None, repo_id: str, repo_type: str,
|
|
| 219 |
path_in_repo="runtime_state/upload_status_journal.jsonl",
|
| 220 |
repo_id=repo_id,
|
| 221 |
repo_type=repo_type,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 222 |
)
|
| 223 |
|
| 224 |
|
| 225 |
-
def upload_archive_streaming(api: HfApi, repo_id: str, repo_type: str,
|
| 226 |
if api is None:
|
| 227 |
raise RuntimeError('api-stream upload requires a Hugging Face token')
|
| 228 |
-
command = ['tar', '-cf', '-'
|
|
|
|
|
|
|
| 229 |
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
| 230 |
assert process.stdout is not None
|
| 231 |
stderr_chunks = []
|
|
@@ -244,6 +333,7 @@ def upload_archive_streaming(api: HfApi, repo_id: str, repo_type: str, dataset_d
|
|
| 244 |
path_in_repo=f"dataset/{archive_name}",
|
| 245 |
repo_id=repo_id,
|
| 246 |
repo_type=repo_type,
|
|
|
|
| 247 |
)
|
| 248 |
finally:
|
| 249 |
try:
|
|
@@ -273,7 +363,7 @@ def run_git(command: Sequence[str], cwd: Path) -> str:
|
|
| 273 |
return (result.stdout or result.stderr or "").strip()
|
| 274 |
|
| 275 |
|
| 276 |
-
def ensure_git_upload_repo(clone_dir: Path, repo_id: str, repo_type: str) -> Path:
|
| 277 |
remote_url = repo_git_url(repo_id, repo_type)
|
| 278 |
clone_dir.parent.mkdir(parents=True, exist_ok=True)
|
| 279 |
if not (clone_dir / '.git').exists():
|
|
@@ -283,7 +373,12 @@ def ensure_git_upload_repo(clone_dir: Path, repo_id: str, repo_type: str) -> Pat
|
|
| 283 |
if configured != remote_url:
|
| 284 |
raise RuntimeError(f"Git upload clone remote mismatch: {configured} != {remote_url}")
|
| 285 |
run_git(["git", "fetch", "origin"], clone_dir)
|
| 286 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 287 |
run_git(["git", "lfs", "install", "--local"], clone_dir)
|
| 288 |
try:
|
| 289 |
run_git(["git", "config", "user.name"], clone_dir)
|
|
@@ -296,13 +391,13 @@ def ensure_git_upload_repo(clone_dir: Path, repo_id: str, repo_type: str) -> Pat
|
|
| 296 |
return clone_dir
|
| 297 |
|
| 298 |
|
| 299 |
-
def list_repo_files_via_git(clone_dir: Path, repo_id: str, repo_type: str) -> List[str]:
|
| 300 |
-
repo_dir = ensure_git_upload_repo(clone_dir, repo_id, repo_type)
|
| 301 |
return [path.name for path in repo_dir.iterdir() if path.is_file()]
|
| 302 |
|
| 303 |
|
| 304 |
-
def upload_archive_via_git(clone_dir: Path, repo_id: str, repo_type: str, archive_path: Path) -> None:
|
| 305 |
-
repo_dir = ensure_git_upload_repo(clone_dir, repo_id, repo_type)
|
| 306 |
target_path = repo_dir / archive_path.name
|
| 307 |
shutil.copy2(archive_path, target_path)
|
| 308 |
run_git(["git", "add", archive_path.name], repo_dir)
|
|
@@ -312,50 +407,60 @@ def upload_archive_via_git(clone_dir: Path, repo_id: str, repo_type: str, archiv
|
|
| 312 |
if diff_result.returncode != 1:
|
| 313 |
raise RuntimeError(f"git diff --cached failed for {archive_path.name}")
|
| 314 |
run_git(["git", "commit", "-m", f"Add {archive_path.name}"], repo_dir)
|
| 315 |
-
run_git(["git", "push", "origin",
|
| 316 |
|
| 317 |
|
| 318 |
def cleanup_local_assets(
|
| 319 |
video_ids: Sequence[str],
|
| 320 |
dataset_dir: Path,
|
|
|
|
| 321 |
raw_video_dir: Path,
|
|
|
|
| 322 |
raw_caption_dir: Path,
|
| 323 |
raw_metadata_dir: Path,
|
| 324 |
) -> None:
|
| 325 |
for video_id in video_ids:
|
| 326 |
-
dataset_video_dir = dataset_dir
|
| 327 |
if dataset_video_dir.exists():
|
| 328 |
shutil.rmtree(dataset_video_dir, ignore_errors=True)
|
| 329 |
-
for
|
| 330 |
-
if
|
| 331 |
-
|
|
|
|
|
|
|
|
|
|
| 332 |
caption_dir = raw_caption_dir / video_id
|
| 333 |
if caption_dir.exists():
|
| 334 |
shutil.rmtree(caption_dir, ignore_errors=True)
|
| 335 |
metadata_path = raw_metadata_dir / f"{video_id}.json"
|
| 336 |
if metadata_path.exists():
|
| 337 |
-
metadata_path.unlink()
|
| 338 |
|
| 339 |
|
| 340 |
def prune_uploaded_runtime_residue(
|
| 341 |
progress: Dict[str, object],
|
| 342 |
dataset_dir: Path,
|
|
|
|
| 343 |
raw_video_dir: Path,
|
|
|
|
| 344 |
raw_caption_dir: Path,
|
| 345 |
raw_metadata_dir: Path,
|
| 346 |
) -> None:
|
| 347 |
uploaded = set(progress.get("uploaded_folders", {}))
|
| 348 |
for video_id in uploaded:
|
| 349 |
-
for
|
| 350 |
-
if
|
| 351 |
-
|
|
|
|
|
|
|
|
|
|
| 352 |
caption_dir = raw_caption_dir / video_id
|
| 353 |
if caption_dir.exists():
|
| 354 |
shutil.rmtree(caption_dir, ignore_errors=True)
|
| 355 |
metadata_path = raw_metadata_dir / f"{video_id}.json"
|
| 356 |
if metadata_path.exists():
|
| 357 |
-
metadata_path.unlink()
|
| 358 |
-
dataset_video_dir = dataset_dir
|
| 359 |
if dataset_video_dir.exists() and not (dataset_video_dir / "npz" / COMPLETE_MARKER_NAME).exists():
|
| 360 |
shutil.rmtree(dataset_video_dir, ignore_errors=True)
|
| 361 |
|
|
@@ -371,7 +476,19 @@ def format_size(num_bytes: int) -> str:
|
|
| 371 |
|
| 372 |
def main() -> None:
|
| 373 |
args = parse_args()
|
| 374 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 375 |
progress = load_progress(args.progress_path)
|
| 376 |
print(
|
| 377 |
f"[pipeline03] loaded progress archives={len(progress.get('archives', {}))} "
|
|
@@ -382,6 +499,8 @@ def main() -> None:
|
|
| 382 |
print(f"[pipeline03] token_present={bool(resolved_token)}", flush=True)
|
| 383 |
api = HfApi(token=resolved_token) if args.upload_mode in {"api", "api-stream"} else None
|
| 384 |
args.dataset_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
| 385 |
|
| 386 |
try:
|
| 387 |
if args.upload_mode in {"api", "api-stream"}:
|
|
@@ -389,46 +508,61 @@ def main() -> None:
|
|
| 389 |
repo_files = []
|
| 390 |
else:
|
| 391 |
print("[pipeline03] listing repo files via git", flush=True)
|
| 392 |
-
repo_files = list_repo_files_via_git(args.git_clone_dir, args.repo_id, args.repo_type)
|
| 393 |
except Exception as exc:
|
| 394 |
print(f"[pipeline03] repo file listing failed: {exc}", flush=True)
|
| 395 |
traceback.print_exc()
|
| 396 |
repo_files = []
|
| 397 |
|
| 398 |
while True:
|
| 399 |
-
|
| 400 |
-
progress
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
batch = enrich_folder_sizes(selected_folder_paths)
|
| 415 |
-
batch_names = [name for name, _, _ in batch]
|
| 416 |
-
batch_bytes = sum(folder_bytes for _, _, folder_bytes in batch)
|
| 417 |
-
print(f"[pipeline03] folder threshold reached; selecting first {len(batch_names)} folders without global size scan", flush=True)
|
| 418 |
-
else:
|
| 419 |
-
remaining_folders = enrich_folder_sizes(remaining_folder_paths)
|
| 420 |
-
remaining_bytes = total_batchable_bytes(remaining_folders)
|
| 421 |
-
require_target_bytes = args.require_target_bytes and not args.allow_small_final_batch
|
| 422 |
-
if require_target_bytes and remaining_bytes < args.target_bytes:
|
| 423 |
-
print(
|
| 424 |
-
f"Skip upload: only {format_size(remaining_bytes)} across {remaining_count} completed NPZ folders available, below targets {format_size(args.target_bytes)} or {args.target_folders} folders."
|
| 425 |
-
)
|
| 426 |
break
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 432 |
archive_root = args.archive_dir if args.upload_mode == "git-ssh" else preferred_temp_archive_dir()
|
| 433 |
archive_path = archive_root / archive_name
|
| 434 |
|
|
@@ -440,6 +574,10 @@ def main() -> None:
|
|
| 440 |
break
|
| 441 |
|
| 442 |
args.archive_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 443 |
append_status_journal(
|
| 444 |
args.status_journal_path,
|
| 445 |
batch_names,
|
|
@@ -450,14 +588,23 @@ def main() -> None:
|
|
| 450 |
)
|
| 451 |
try:
|
| 452 |
if args.upload_mode == "api-stream":
|
| 453 |
-
upload_archive_streaming(api, args.repo_id, args.repo_type,
|
| 454 |
else:
|
| 455 |
-
create_tar_archive(archive_path,
|
| 456 |
if args.upload_mode == "api":
|
| 457 |
-
upload_archive(api, args.repo_id, args.repo_type, archive_path)
|
| 458 |
else:
|
| 459 |
-
upload_archive_via_git(args.git_clone_dir, args.repo_id, args.repo_type, archive_path)
|
| 460 |
except Exception as exc:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 461 |
update_many_video_stats_best_effort(
|
| 462 |
args.stats_npz,
|
| 463 |
args.status_journal_path,
|
|
@@ -465,28 +612,57 @@ def main() -> None:
|
|
| 465 |
upload_status="failed",
|
| 466 |
local_cleanup_status="pending",
|
| 467 |
archive_name=archive_name,
|
|
|
|
| 468 |
last_error=str(exc),
|
| 469 |
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 470 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 471 |
if archive_path.exists():
|
| 472 |
archive_path.unlink(missing_ok=True)
|
| 473 |
raise
|
| 474 |
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
"
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
| 482 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 483 |
|
| 484 |
cleanup_error = ""
|
| 485 |
try:
|
| 486 |
cleanup_local_assets(
|
| 487 |
batch_names,
|
| 488 |
args.dataset_dir,
|
|
|
|
| 489 |
args.raw_video_dir,
|
|
|
|
| 490 |
args.raw_caption_dir,
|
| 491 |
args.raw_metadata_dir,
|
| 492 |
)
|
|
@@ -494,7 +670,6 @@ def main() -> None:
|
|
| 494 |
archive_path.unlink(missing_ok=True)
|
| 495 |
except Exception as exc:
|
| 496 |
cleanup_error = str(exc)
|
| 497 |
-
repo_files.append(f"dataset/{archive_name}")
|
| 498 |
update_many_video_stats_best_effort(
|
| 499 |
args.stats_npz,
|
| 500 |
args.status_journal_path,
|
|
@@ -502,20 +677,29 @@ def main() -> None:
|
|
| 502 |
upload_status="uploaded",
|
| 503 |
local_cleanup_status="deleted" if not cleanup_error else "failed",
|
| 504 |
archive_name=archive_name,
|
|
|
|
| 505 |
last_error=cleanup_error,
|
| 506 |
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 507 |
)
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
args.
|
| 515 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 516 |
if cleanup_error:
|
| 517 |
raise RuntimeError(f"Uploaded {archive_name} but local cleanup failed: {cleanup_error}")
|
| 518 |
-
print(f"Uploaded {archive_name} and cleaned raw assets for {len(batch_names)} videos.")
|
| 519 |
|
| 520 |
|
| 521 |
if __name__ == "__main__":
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
|
| 3 |
import argparse
|
| 4 |
+
import contextlib
|
| 5 |
+
import fcntl
|
| 6 |
+
import hashlib
|
| 7 |
import json
|
| 8 |
import os
|
| 9 |
+
import random
|
| 10 |
import shutil
|
| 11 |
import subprocess
|
| 12 |
import sys
|
|
|
|
| 24 |
if str(REPO_ROOT) not in sys.path:
|
| 25 |
sys.path.insert(0, str(REPO_ROOT))
|
| 26 |
|
| 27 |
+
from utils.stats_npz import apply_status_journal_to_stats, update_many_video_stats_with_retry
|
| 28 |
+
from utils.dataset_pool import find_dataset_video_dir, list_unuploaded_folder_paths as list_unuploaded_folder_paths_pool
|
| 29 |
|
| 30 |
|
| 31 |
DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
|
|
|
|
| 48 |
description="Archive NPZ folders into 14GB tar files and upload them to Hugging Face."
|
| 49 |
)
|
| 50 |
parser.add_argument("--dataset-dir", type=Path, default=DEFAULT_DATASET_DIR)
|
| 51 |
+
parser.add_argument("--scratch-dataset-dir", type=Path, default=None)
|
| 52 |
parser.add_argument("--raw-video-dir", type=Path, default=DEFAULT_RAW_VIDEO_DIR)
|
| 53 |
+
parser.add_argument("--scratch-raw-video-dir", type=Path, default=None)
|
| 54 |
parser.add_argument("--raw-caption-dir", type=Path, default=DEFAULT_RAW_CAPTION_DIR)
|
| 55 |
parser.add_argument("--raw-metadata-dir", type=Path, default=DEFAULT_RAW_METADATA_DIR)
|
| 56 |
parser.add_argument("--archive-dir", type=Path, default=DEFAULT_ARCHIVE_DIR)
|
| 57 |
parser.add_argument("--progress-path", type=Path, default=DEFAULT_PROGRESS_PATH)
|
| 58 |
parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
|
| 59 |
parser.add_argument("--status-journal-path", type=Path, default=DEFAULT_STATUS_JOURNAL_PATH)
|
| 60 |
+
parser.add_argument("--processed-csv-path", type=Path, default=None)
|
| 61 |
parser.add_argument("--repo-id", default="SignerX/SignVerse-2M")
|
| 62 |
parser.add_argument("--repo-type", default="dataset")
|
| 63 |
+
parser.add_argument("--repo-revision", default=os.environ.get("HF_REPO_REVISION", "main"))
|
| 64 |
parser.add_argument("--target-bytes", type=int, default=DEFAULT_TARGET_BYTES)
|
| 65 |
parser.add_argument("--target-folders", type=int, default=DEFAULT_TARGET_FOLDERS)
|
| 66 |
+
parser.add_argument("--parallel-shards", type=int, default=1)
|
| 67 |
+
parser.add_argument("--shard-index", type=int, default=0)
|
| 68 |
+
parser.add_argument("--start-stagger-min", type=int, default=0)
|
| 69 |
+
parser.add_argument("--start-stagger-max", type=int, default=0)
|
| 70 |
+
parser.add_argument("--skip-stats-write", action="store_true")
|
| 71 |
parser.add_argument("--require-target-bytes", action="store_true", default=True)
|
| 72 |
parser.add_argument("--allow-small-final-batch", action="store_true")
|
| 73 |
parser.add_argument("--dry-run", action="store_true")
|
|
|
|
| 92 |
|
| 93 |
|
| 94 |
|
| 95 |
+
def journal_lock_path(journal_path: Path) -> Path:
|
| 96 |
+
return journal_path.with_suffix(journal_path.suffix + '.lock')
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@contextlib.contextmanager
|
| 100 |
+
def locked_journal(journal_path: Path):
|
| 101 |
+
lock_path = journal_lock_path(journal_path)
|
| 102 |
+
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
| 103 |
+
with lock_path.open('a+', encoding='utf-8') as handle:
|
| 104 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
|
| 105 |
+
try:
|
| 106 |
+
yield
|
| 107 |
+
finally:
|
| 108 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
def append_status_journal(journal_path: Path, video_ids: Sequence[str], **updates: str) -> None:
|
| 112 |
journal_path.parent.mkdir(parents=True, exist_ok=True)
|
| 113 |
payload = {
|
|
|
|
| 115 |
"updates": {k: ("" if v is None else str(v)) for k, v in updates.items()},
|
| 116 |
"recorded_at": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 117 |
}
|
| 118 |
+
with locked_journal(journal_path):
|
| 119 |
+
with journal_path.open("a", encoding="utf-8") as handle:
|
| 120 |
+
handle.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
| 121 |
|
| 122 |
|
| 123 |
+
def update_many_video_stats_best_effort(stats_path: Path, journal_path: Path, video_ids: Sequence[str], skip_stats_write: bool = False, **updates: str) -> None:
|
| 124 |
+
if skip_stats_write:
|
| 125 |
+
append_status_journal(journal_path, video_ids, **updates)
|
| 126 |
+
return
|
| 127 |
try:
|
| 128 |
update_many_video_stats_with_retry(stats_path, video_ids, **updates)
|
| 129 |
except Exception as exc:
|
|
|
|
| 132 |
append_status_journal(journal_path, video_ids, **payload)
|
| 133 |
print(f"Warning: stats.npz update deferred to journal due to: {exc}")
|
| 134 |
|
| 135 |
+
def progress_lock_path(progress_path: Path) -> Path:
|
| 136 |
+
return progress_path.with_suffix(progress_path.suffix + '.lock')
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@contextlib.contextmanager
|
| 140 |
+
def locked_progress(progress_path: Path):
|
| 141 |
+
lock_path = progress_lock_path(progress_path)
|
| 142 |
+
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
| 143 |
+
with lock_path.open('a+', encoding='utf-8') as handle:
|
| 144 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
|
| 145 |
+
try:
|
| 146 |
+
yield
|
| 147 |
+
finally:
|
| 148 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def shard_for_video_id(video_id: str, shard_count: int) -> int:
|
| 152 |
+
if shard_count <= 1:
|
| 153 |
+
return 0
|
| 154 |
+
digest = hashlib.sha1(video_id.encode('utf-8')).hexdigest()
|
| 155 |
+
return int(digest[:8], 16) % shard_count
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def filter_folders_for_shard(folders: Sequence[Tuple[str, Path]], shard_count: int, shard_index: int) -> List[Tuple[str, Path]]:
|
| 159 |
+
if shard_count <= 1:
|
| 160 |
+
return list(folders)
|
| 161 |
+
return [(video_id, folder_path) for video_id, folder_path in folders if shard_for_video_id(video_id, shard_count) == shard_index]
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def load_progress(progress_path: Path, retries: int = 8, retry_delay: float = 0.2) -> Dict[str, object]:
|
| 165 |
+
if not progress_path.exists():
|
| 166 |
+
return {"archives": {}, "uploaded_folders": {}}
|
| 167 |
+
last_error = None
|
| 168 |
+
for _ in range(retries):
|
| 169 |
+
try:
|
| 170 |
+
with progress_path.open("r", encoding="utf-8") as handle:
|
| 171 |
+
data = handle.read()
|
| 172 |
+
if not data.strip():
|
| 173 |
+
raise json.JSONDecodeError("empty progress file", data, 0)
|
| 174 |
+
return json.loads(data)
|
| 175 |
+
except (json.JSONDecodeError, OSError) as exc:
|
| 176 |
+
last_error = exc
|
| 177 |
+
time.sleep(retry_delay)
|
| 178 |
+
if last_error is not None:
|
| 179 |
+
raise last_error
|
| 180 |
return {"archives": {}, "uploaded_folders": {}}
|
| 181 |
|
| 182 |
|
| 183 |
def save_progress(progress_path: Path, progress: Dict[str, object]) -> None:
|
| 184 |
progress_path.parent.mkdir(parents=True, exist_ok=True)
|
| 185 |
+
tmp_path = progress_path.parent / f'.{progress_path.name}.{os.getpid()}.tmp'
|
| 186 |
+
with tmp_path.open("w", encoding="utf-8") as handle:
|
| 187 |
json.dump(progress, handle, ensure_ascii=False, indent=2)
|
| 188 |
+
handle.flush()
|
| 189 |
+
os.fsync(handle.fileno())
|
| 190 |
+
os.replace(tmp_path, progress_path)
|
| 191 |
|
| 192 |
|
| 193 |
def folder_size_bytes(folder_path: Path) -> int:
|
|
|
|
| 204 |
return total
|
| 205 |
|
| 206 |
|
| 207 |
+
def list_unuploaded_folder_paths(dataset_dir: Path, scratch_dataset_dir: Path | None, progress: Dict[str, object]) -> List[Tuple[str, Path]]:
|
| 208 |
uploaded_folders = progress.get("uploaded_folders", {})
|
| 209 |
+
return list_unuploaded_folder_paths_pool(dataset_dir, scratch_dataset_dir, uploaded_folders)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
|
| 211 |
|
| 212 |
def enrich_folder_sizes(folders: Sequence[Tuple[str, Path]]) -> List[Tuple[str, Path, int]]:
|
|
|
|
| 256 |
path.mkdir(parents=True, exist_ok=True)
|
| 257 |
return path
|
| 258 |
|
| 259 |
+
def create_tar_archive(archive_path: Path, folder_paths: Sequence[Tuple[str, Path]]) -> None:
|
| 260 |
archive_path.parent.mkdir(parents=True, exist_ok=True)
|
| 261 |
with tarfile.open(archive_path, mode="w") as tar:
|
| 262 |
+
for folder_name, folder_path in folder_paths:
|
| 263 |
+
tar.add(folder_path, arcname=folder_name, recursive=True)
|
| 264 |
|
| 265 |
|
| 266 |
+
def upload_archive(api: HfApi, repo_id: str, repo_type: str, repo_revision: str, archive_path: Path) -> None:
|
| 267 |
api.upload_file(
|
| 268 |
path_or_fileobj=str(archive_path),
|
| 269 |
path_in_repo=f"dataset/{archive_path.name}",
|
| 270 |
repo_id=repo_id,
|
| 271 |
repo_type=repo_type,
|
| 272 |
+
revision=repo_revision,
|
| 273 |
)
|
| 274 |
|
| 275 |
|
| 276 |
+
def upload_runtime_state_files(api: HfApi | None, repo_id: str, repo_type: str, repo_revision: str, progress_path: Path, journal_path: Path, stats_path: Path | None = None, processed_csv_path: Path | None = None) -> None:
|
| 277 |
if api is None:
|
| 278 |
return
|
| 279 |
api.upload_file(
|
|
|
|
| 281 |
path_in_repo="runtime_state/archive_upload_progress.json",
|
| 282 |
repo_id=repo_id,
|
| 283 |
repo_type=repo_type,
|
| 284 |
+
revision=repo_revision,
|
| 285 |
)
|
| 286 |
if journal_path.exists():
|
| 287 |
api.upload_file(
|
|
|
|
| 289 |
path_in_repo="runtime_state/upload_status_journal.jsonl",
|
| 290 |
repo_id=repo_id,
|
| 291 |
repo_type=repo_type,
|
| 292 |
+
revision=repo_revision,
|
| 293 |
+
)
|
| 294 |
+
if stats_path is not None and stats_path.exists():
|
| 295 |
+
api.upload_file(
|
| 296 |
+
path_or_fileobj=str(stats_path),
|
| 297 |
+
path_in_repo="runtime_state/stats.npz",
|
| 298 |
+
repo_id=repo_id,
|
| 299 |
+
repo_type=repo_type,
|
| 300 |
+
revision=repo_revision,
|
| 301 |
+
)
|
| 302 |
+
if processed_csv_path is not None and processed_csv_path.exists():
|
| 303 |
+
api.upload_file(
|
| 304 |
+
path_or_fileobj=str(processed_csv_path),
|
| 305 |
+
path_in_repo="runtime_state/SignVerse-2M-metadata_processed.csv",
|
| 306 |
+
repo_id=repo_id,
|
| 307 |
+
repo_type=repo_type,
|
| 308 |
+
revision=repo_revision,
|
| 309 |
)
|
| 310 |
|
| 311 |
|
| 312 |
+
def upload_archive_streaming(api: HfApi, repo_id: str, repo_type: str, repo_revision: str, folder_paths: Sequence[Tuple[str, Path]], archive_name: str) -> None:
|
| 313 |
if api is None:
|
| 314 |
raise RuntimeError('api-stream upload requires a Hugging Face token')
|
| 315 |
+
command = ['tar', '-cf', '-']
|
| 316 |
+
for folder_name, folder_path in folder_paths:
|
| 317 |
+
command.extend(['-C', str(folder_path.parent), folder_name])
|
| 318 |
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
| 319 |
assert process.stdout is not None
|
| 320 |
stderr_chunks = []
|
|
|
|
| 333 |
path_in_repo=f"dataset/{archive_name}",
|
| 334 |
repo_id=repo_id,
|
| 335 |
repo_type=repo_type,
|
| 336 |
+
revision=repo_revision,
|
| 337 |
)
|
| 338 |
finally:
|
| 339 |
try:
|
|
|
|
| 363 |
return (result.stdout or result.stderr or "").strip()
|
| 364 |
|
| 365 |
|
| 366 |
+
def ensure_git_upload_repo(clone_dir: Path, repo_id: str, repo_type: str, repo_revision: str) -> Path:
|
| 367 |
remote_url = repo_git_url(repo_id, repo_type)
|
| 368 |
clone_dir.parent.mkdir(parents=True, exist_ok=True)
|
| 369 |
if not (clone_dir / '.git').exists():
|
|
|
|
| 373 |
if configured != remote_url:
|
| 374 |
raise RuntimeError(f"Git upload clone remote mismatch: {configured} != {remote_url}")
|
| 375 |
run_git(["git", "fetch", "origin"], clone_dir)
|
| 376 |
+
remote_branch = f"origin/{repo_revision}"
|
| 377 |
+
existing = subprocess.run(["git", "show-ref", "--verify", f"refs/remotes/{remote_branch}"], cwd=str(clone_dir))
|
| 378 |
+
if existing.returncode == 0:
|
| 379 |
+
run_git(["git", "checkout", "-B", repo_revision, remote_branch], clone_dir)
|
| 380 |
+
else:
|
| 381 |
+
run_git(["git", "checkout", "-B", repo_revision, "origin/main"], clone_dir)
|
| 382 |
run_git(["git", "lfs", "install", "--local"], clone_dir)
|
| 383 |
try:
|
| 384 |
run_git(["git", "config", "user.name"], clone_dir)
|
|
|
|
| 391 |
return clone_dir
|
| 392 |
|
| 393 |
|
| 394 |
+
def list_repo_files_via_git(clone_dir: Path, repo_id: str, repo_type: str, repo_revision: str) -> List[str]:
|
| 395 |
+
repo_dir = ensure_git_upload_repo(clone_dir, repo_id, repo_type, repo_revision)
|
| 396 |
return [path.name for path in repo_dir.iterdir() if path.is_file()]
|
| 397 |
|
| 398 |
|
| 399 |
+
def upload_archive_via_git(clone_dir: Path, repo_id: str, repo_type: str, repo_revision: str, archive_path: Path) -> None:
|
| 400 |
+
repo_dir = ensure_git_upload_repo(clone_dir, repo_id, repo_type, repo_revision)
|
| 401 |
target_path = repo_dir / archive_path.name
|
| 402 |
shutil.copy2(archive_path, target_path)
|
| 403 |
run_git(["git", "add", archive_path.name], repo_dir)
|
|
|
|
| 407 |
if diff_result.returncode != 1:
|
| 408 |
raise RuntimeError(f"git diff --cached failed for {archive_path.name}")
|
| 409 |
run_git(["git", "commit", "-m", f"Add {archive_path.name}"], repo_dir)
|
| 410 |
+
run_git(["git", "push", "origin", repo_revision], repo_dir)
|
| 411 |
|
| 412 |
|
| 413 |
def cleanup_local_assets(
|
| 414 |
video_ids: Sequence[str],
|
| 415 |
dataset_dir: Path,
|
| 416 |
+
scratch_dataset_dir: Path | None,
|
| 417 |
raw_video_dir: Path,
|
| 418 |
+
scratch_raw_video_dir: Path | None,
|
| 419 |
raw_caption_dir: Path,
|
| 420 |
raw_metadata_dir: Path,
|
| 421 |
) -> None:
|
| 422 |
for video_id in video_ids:
|
| 423 |
+
dataset_video_dir = find_dataset_video_dir(video_id, dataset_dir, scratch_dataset_dir)
|
| 424 |
if dataset_video_dir.exists():
|
| 425 |
shutil.rmtree(dataset_video_dir, ignore_errors=True)
|
| 426 |
+
for raw_dir in [raw_video_dir, scratch_raw_video_dir]:
|
| 427 |
+
if raw_dir is None:
|
| 428 |
+
continue
|
| 429 |
+
for path in raw_dir.glob(f"{video_id}.*"):
|
| 430 |
+
if path.is_file():
|
| 431 |
+
path.unlink()
|
| 432 |
caption_dir = raw_caption_dir / video_id
|
| 433 |
if caption_dir.exists():
|
| 434 |
shutil.rmtree(caption_dir, ignore_errors=True)
|
| 435 |
metadata_path = raw_metadata_dir / f"{video_id}.json"
|
| 436 |
if metadata_path.exists():
|
| 437 |
+
metadata_path.unlink(missing_ok=True)
|
| 438 |
|
| 439 |
|
| 440 |
def prune_uploaded_runtime_residue(
|
| 441 |
progress: Dict[str, object],
|
| 442 |
dataset_dir: Path,
|
| 443 |
+
scratch_dataset_dir: Path | None,
|
| 444 |
raw_video_dir: Path,
|
| 445 |
+
scratch_raw_video_dir: Path | None,
|
| 446 |
raw_caption_dir: Path,
|
| 447 |
raw_metadata_dir: Path,
|
| 448 |
) -> None:
|
| 449 |
uploaded = set(progress.get("uploaded_folders", {}))
|
| 450 |
for video_id in uploaded:
|
| 451 |
+
for raw_dir in [raw_video_dir, scratch_raw_video_dir]:
|
| 452 |
+
if raw_dir is None:
|
| 453 |
+
continue
|
| 454 |
+
for path in raw_dir.glob(f"{video_id}.*"):
|
| 455 |
+
if path.is_file():
|
| 456 |
+
path.unlink(missing_ok=True)
|
| 457 |
caption_dir = raw_caption_dir / video_id
|
| 458 |
if caption_dir.exists():
|
| 459 |
shutil.rmtree(caption_dir, ignore_errors=True)
|
| 460 |
metadata_path = raw_metadata_dir / f"{video_id}.json"
|
| 461 |
if metadata_path.exists():
|
| 462 |
+
metadata_path.unlink(missing_ok=True)
|
| 463 |
+
dataset_video_dir = find_dataset_video_dir(video_id, dataset_dir, scratch_dataset_dir)
|
| 464 |
if dataset_video_dir.exists() and not (dataset_video_dir / "npz" / COMPLETE_MARKER_NAME).exists():
|
| 465 |
shutil.rmtree(dataset_video_dir, ignore_errors=True)
|
| 466 |
|
|
|
|
| 476 |
|
| 477 |
def main() -> None:
|
| 478 |
args = parse_args()
|
| 479 |
+
if args.parallel_shards < 1:
|
| 480 |
+
raise ValueError('--parallel-shards must be >= 1')
|
| 481 |
+
if not (0 <= args.shard_index < args.parallel_shards):
|
| 482 |
+
raise ValueError('--shard-index must satisfy 0 <= shard-index < parallel-shards')
|
| 483 |
+
if args.start_stagger_max < args.start_stagger_min:
|
| 484 |
+
raise ValueError('--start-stagger-max must be >= --start-stagger-min')
|
| 485 |
+
if args.start_stagger_max > 0:
|
| 486 |
+
delay = random.randint(args.start_stagger_min, args.start_stagger_max)
|
| 487 |
+
if delay > 0:
|
| 488 |
+
print(f"[pipeline03] stagger sleep {delay}s shard={args.shard_index}/{args.parallel_shards}", flush=True)
|
| 489 |
+
time.sleep(delay)
|
| 490 |
+
print(f"[pipeline03] start upload_mode={args.upload_mode} repo_id={args.repo_id} shard={args.shard_index}/{args.parallel_shards}", flush=True)
|
| 491 |
+
skip_stats_write = args.skip_stats_write or args.parallel_shards > 1
|
| 492 |
progress = load_progress(args.progress_path)
|
| 493 |
print(
|
| 494 |
f"[pipeline03] loaded progress archives={len(progress.get('archives', {}))} "
|
|
|
|
| 499 |
print(f"[pipeline03] token_present={bool(resolved_token)}", flush=True)
|
| 500 |
api = HfApi(token=resolved_token) if args.upload_mode in {"api", "api-stream"} else None
|
| 501 |
args.dataset_dir.mkdir(parents=True, exist_ok=True)
|
| 502 |
+
if args.scratch_dataset_dir is not None:
|
| 503 |
+
args.scratch_dataset_dir.mkdir(parents=True, exist_ok=True)
|
| 504 |
|
| 505 |
try:
|
| 506 |
if args.upload_mode in {"api", "api-stream"}:
|
|
|
|
| 508 |
repo_files = []
|
| 509 |
else:
|
| 510 |
print("[pipeline03] listing repo files via git", flush=True)
|
| 511 |
+
repo_files = list_repo_files_via_git(args.git_clone_dir, args.repo_id, args.repo_type, args.repo_revision)
|
| 512 |
except Exception as exc:
|
| 513 |
print(f"[pipeline03] repo file listing failed: {exc}", flush=True)
|
| 514 |
traceback.print_exc()
|
| 515 |
repo_files = []
|
| 516 |
|
| 517 |
while True:
|
| 518 |
+
with locked_progress(args.progress_path):
|
| 519 |
+
progress = load_progress(args.progress_path)
|
| 520 |
+
prune_uploaded_runtime_residue(
|
| 521 |
+
progress,
|
| 522 |
+
args.dataset_dir,
|
| 523 |
+
args.scratch_dataset_dir,
|
| 524 |
+
args.raw_video_dir,
|
| 525 |
+
args.scratch_raw_video_dir,
|
| 526 |
+
args.raw_caption_dir,
|
| 527 |
+
args.raw_metadata_dir,
|
| 528 |
+
)
|
| 529 |
+
remaining_folder_paths = list_unuploaded_folder_paths(args.dataset_dir, args.scratch_dataset_dir, progress)
|
| 530 |
+
remaining_folder_paths = filter_folders_for_shard(remaining_folder_paths, args.parallel_shards, args.shard_index)
|
| 531 |
+
if not remaining_folder_paths:
|
| 532 |
+
print(f"No unuploaded dataset folders remain for shard {args.shard_index}/{args.parallel_shards}.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 533 |
break
|
| 534 |
+
remaining_count = len(remaining_folder_paths)
|
| 535 |
+
print(f"[pipeline03] remaining completed folders available={remaining_count} shard={args.shard_index}/{args.parallel_shards}", flush=True)
|
| 536 |
+
if remaining_count >= args.target_folders:
|
| 537 |
+
selected_folder_paths = remaining_folder_paths[: args.target_folders]
|
| 538 |
+
batch = enrich_folder_sizes(selected_folder_paths)
|
| 539 |
+
batch_names = [name for name, _, _ in batch]
|
| 540 |
+
batch_bytes = sum(folder_bytes for _, _, folder_bytes in batch)
|
| 541 |
+
print(f"[pipeline03] folder threshold reached; selecting first {len(batch_names)} folders without global size scan", flush=True)
|
| 542 |
+
else:
|
| 543 |
+
remaining_folders = enrich_folder_sizes(remaining_folder_paths)
|
| 544 |
+
remaining_bytes = total_batchable_bytes(remaining_folders)
|
| 545 |
+
require_target_bytes = args.require_target_bytes and not args.allow_small_final_batch
|
| 546 |
+
if require_target_bytes and remaining_bytes < args.target_bytes:
|
| 547 |
+
print(
|
| 548 |
+
f"Skip upload: only {format_size(remaining_bytes)} across {remaining_count} completed NPZ folders available, below targets {format_size(args.target_bytes)} or {args.target_folders} folders."
|
| 549 |
+
)
|
| 550 |
+
break
|
| 551 |
+
batch = build_batch(remaining_folders, args.target_bytes)
|
| 552 |
+
batch_names = [name for name, _, _ in batch]
|
| 553 |
+
batch_bytes = sum(folder_bytes for _, _, folder_bytes in batch)
|
| 554 |
+
archive_index = next_archive_index(progress, repo_files)
|
| 555 |
+
archive_name = f"Sign_DWPose_NPZ_{archive_index:06d}.tar"
|
| 556 |
+
progress["archives"][archive_name] = {
|
| 557 |
+
"folders": batch_names,
|
| 558 |
+
"size_bytes": batch_bytes,
|
| 559 |
+
"status": "uploading",
|
| 560 |
+
"reserved_at": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 561 |
+
"shard_index": args.shard_index,
|
| 562 |
+
"parallel_shards": args.parallel_shards,
|
| 563 |
+
}
|
| 564 |
+
save_progress(args.progress_path, progress)
|
| 565 |
+
|
| 566 |
archive_root = args.archive_dir if args.upload_mode == "git-ssh" else preferred_temp_archive_dir()
|
| 567 |
archive_path = archive_root / archive_name
|
| 568 |
|
|
|
|
| 574 |
break
|
| 575 |
|
| 576 |
args.archive_dir.mkdir(parents=True, exist_ok=True)
|
| 577 |
+
try:
|
| 578 |
+
apply_status_journal_to_stats(args.stats_npz, args.status_journal_path)
|
| 579 |
+
except Exception as exc:
|
| 580 |
+
print(f"Warning: pre-upload status compaction skipped due to: {exc}")
|
| 581 |
append_status_journal(
|
| 582 |
args.status_journal_path,
|
| 583 |
batch_names,
|
|
|
|
| 588 |
)
|
| 589 |
try:
|
| 590 |
if args.upload_mode == "api-stream":
|
| 591 |
+
upload_archive_streaming(api, args.repo_id, args.repo_type, [(name, path) for name, path, _ in batch], archive_name)
|
| 592 |
else:
|
| 593 |
+
create_tar_archive(archive_path, [(name, path) for name, path, _ in batch])
|
| 594 |
if args.upload_mode == "api":
|
| 595 |
+
upload_archive(api, args.repo_id, args.repo_type, args.repo_revision, archive_path)
|
| 596 |
else:
|
| 597 |
+
upload_archive_via_git(args.git_clone_dir, args.repo_id, args.repo_type, args.repo_revision, archive_path)
|
| 598 |
except Exception as exc:
|
| 599 |
+
with locked_progress(args.progress_path):
|
| 600 |
+
progress = load_progress(args.progress_path)
|
| 601 |
+
archive_meta = progress.get("archives", {}).get(archive_name, {})
|
| 602 |
+
if isinstance(archive_meta, dict):
|
| 603 |
+
archive_meta["status"] = "failed"
|
| 604 |
+
archive_meta["last_error"] = str(exc)
|
| 605 |
+
archive_meta["failed_at"] = time.strftime("%Y-%m-%d %H:%M:%S")
|
| 606 |
+
progress["archives"][archive_name] = archive_meta
|
| 607 |
+
save_progress(args.progress_path, progress)
|
| 608 |
update_many_video_stats_best_effort(
|
| 609 |
args.stats_npz,
|
| 610 |
args.status_journal_path,
|
|
|
|
| 612 |
upload_status="failed",
|
| 613 |
local_cleanup_status="pending",
|
| 614 |
archive_name=archive_name,
|
| 615 |
+
skip_stats_write=skip_stats_write,
|
| 616 |
last_error=str(exc),
|
| 617 |
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 618 |
)
|
| 619 |
+
try:
|
| 620 |
+
apply_status_journal_to_stats(args.stats_npz, args.status_journal_path)
|
| 621 |
+
except Exception as compact_exc:
|
| 622 |
+
print(f"Warning: failed-upload compaction skipped due to: {compact_exc}")
|
| 623 |
if archive_path.exists():
|
| 624 |
archive_path.unlink(missing_ok=True)
|
| 625 |
raise
|
| 626 |
|
| 627 |
+
with locked_progress(args.progress_path):
|
| 628 |
+
progress = load_progress(args.progress_path)
|
| 629 |
+
progress["archives"][archive_name] = {
|
| 630 |
+
"folders": batch_names,
|
| 631 |
+
"size_bytes": batch_bytes,
|
| 632 |
+
"uploaded_at": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 633 |
+
"status": "uploaded",
|
| 634 |
+
"shard_index": args.shard_index,
|
| 635 |
+
"parallel_shards": args.parallel_shards,
|
| 636 |
+
}
|
| 637 |
+
for folder_name in batch_names:
|
| 638 |
+
progress["uploaded_folders"][folder_name] = archive_name
|
| 639 |
+
save_progress(args.progress_path, progress)
|
| 640 |
+
|
| 641 |
+
repo_files.append(f"dataset/{archive_name}")
|
| 642 |
+
update_many_video_stats_best_effort(
|
| 643 |
+
args.stats_npz,
|
| 644 |
+
args.status_journal_path,
|
| 645 |
+
batch_names,
|
| 646 |
+
upload_status="uploaded",
|
| 647 |
+
local_cleanup_status="pending",
|
| 648 |
+
archive_name=archive_name,
|
| 649 |
+
skip_stats_write=skip_stats_write,
|
| 650 |
+
last_error="",
|
| 651 |
+
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 652 |
+
)
|
| 653 |
+
try:
|
| 654 |
+
apply_status_journal_to_stats(args.stats_npz, args.status_journal_path)
|
| 655 |
+
except Exception as exc:
|
| 656 |
+
print(f"Warning: post-upload status compaction skipped due to: {exc}")
|
| 657 |
|
| 658 |
cleanup_error = ""
|
| 659 |
try:
|
| 660 |
cleanup_local_assets(
|
| 661 |
batch_names,
|
| 662 |
args.dataset_dir,
|
| 663 |
+
args.scratch_dataset_dir,
|
| 664 |
args.raw_video_dir,
|
| 665 |
+
args.scratch_raw_video_dir,
|
| 666 |
args.raw_caption_dir,
|
| 667 |
args.raw_metadata_dir,
|
| 668 |
)
|
|
|
|
| 670 |
archive_path.unlink(missing_ok=True)
|
| 671 |
except Exception as exc:
|
| 672 |
cleanup_error = str(exc)
|
|
|
|
| 673 |
update_many_video_stats_best_effort(
|
| 674 |
args.stats_npz,
|
| 675 |
args.status_journal_path,
|
|
|
|
| 677 |
upload_status="uploaded",
|
| 678 |
local_cleanup_status="deleted" if not cleanup_error else "failed",
|
| 679 |
archive_name=archive_name,
|
| 680 |
+
skip_stats_write=skip_stats_write,
|
| 681 |
last_error=cleanup_error,
|
| 682 |
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 683 |
)
|
| 684 |
+
try:
|
| 685 |
+
apply_status_journal_to_stats(args.stats_npz, args.status_journal_path)
|
| 686 |
+
except Exception as exc:
|
| 687 |
+
print(f"Warning: post-cleanup status compaction skipped due to: {exc}")
|
| 688 |
+
upload_runtime_state_files(api, args.repo_id, args.repo_type, args.repo_revision, args.progress_path, args.status_journal_path, args.stats_npz, args.processed_csv_path if hasattr(args, "processed_csv_path") else None)
|
| 689 |
+
with locked_progress(args.progress_path):
|
| 690 |
+
progress = load_progress(args.progress_path)
|
| 691 |
+
prune_uploaded_runtime_residue(
|
| 692 |
+
progress,
|
| 693 |
+
args.dataset_dir,
|
| 694 |
+
args.scratch_dataset_dir,
|
| 695 |
+
args.raw_video_dir,
|
| 696 |
+
args.scratch_raw_video_dir,
|
| 697 |
+
args.raw_caption_dir,
|
| 698 |
+
args.raw_metadata_dir,
|
| 699 |
+
)
|
| 700 |
if cleanup_error:
|
| 701 |
raise RuntimeError(f"Uploaded {archive_name} but local cleanup failed: {cleanup_error}")
|
| 702 |
+
print(f"Uploaded {archive_name} and cleaned raw assets for {len(batch_names)} videos. shard={args.shard_index}/{args.parallel_shards}")
|
| 703 |
|
| 704 |
|
| 705 |
if __name__ == "__main__":
|
scripts/runtime_status.py
CHANGED
|
@@ -4,13 +4,16 @@ from __future__ import annotations
|
|
| 4 |
import argparse
|
| 5 |
import csv
|
| 6 |
import json
|
|
|
|
| 7 |
import re
|
| 8 |
import subprocess
|
| 9 |
from collections import Counter
|
| 10 |
from pathlib import Path
|
| 11 |
|
|
|
|
|
|
|
| 12 |
VIDEO_EXTS = {'.mp4', '.mkv', '.webm', '.mov'}
|
| 13 |
-
ARRAY_RANGE_RE = re.compile(r'^(\d+)_
|
| 14 |
PROCESSED_REQUIRED_COLUMNS = {
|
| 15 |
'video_id',
|
| 16 |
'download_status',
|
|
@@ -18,6 +21,8 @@ PROCESSED_REQUIRED_COLUMNS = {
|
|
| 18 |
'upload_status',
|
| 19 |
'archive_name',
|
| 20 |
}
|
|
|
|
|
|
|
| 21 |
|
| 22 |
|
| 23 |
def run_command(cmd: list[str]) -> str:
|
|
@@ -34,10 +39,38 @@ def count_claims(directory: Path) -> int:
|
|
| 34 |
return sum(1 for _ in directory.glob('*.claim'))
|
| 35 |
|
| 36 |
|
| 37 |
-
def
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
|
| 43 |
def sum_file_sizes(paths: list[Path]) -> int:
|
|
@@ -66,7 +99,7 @@ def expand_task_count(jobid_token: str) -> int:
|
|
| 66 |
m = ARRAY_RANGE_RE.match(jobid_token)
|
| 67 |
if not m:
|
| 68 |
return 1
|
| 69 |
-
body = m.group(
|
| 70 |
if '%' in body:
|
| 71 |
body = body.split('%', 1)[0]
|
| 72 |
total = 0
|
|
@@ -89,9 +122,11 @@ def queue_status(username: str) -> dict[str, object]:
|
|
| 89 |
output = run_command(['squeue', '-u', username, '-h', '-o', '%i|%j|%T|%P'])
|
| 90 |
job_counts: Counter[str] = Counter()
|
| 91 |
partition_counts: Counter[str] = Counter()
|
|
|
|
| 92 |
running_dwpose = 0
|
| 93 |
running_download = 0
|
| 94 |
pending_download = 0
|
|
|
|
| 95 |
if output:
|
| 96 |
for line in output.splitlines():
|
| 97 |
parts = line.split('|')
|
|
@@ -101,23 +136,80 @@ def queue_status(username: str) -> dict[str, object]:
|
|
| 101 |
count = expand_task_count(jobid_token)
|
| 102 |
job_counts[f'{job}|{state}'] += count
|
| 103 |
partition_counts[f'{job}|{partition}|{state}'] += count
|
|
|
|
|
|
|
| 104 |
if job == 'dwpose' and state == 'RUNNING':
|
| 105 |
running_dwpose += count
|
| 106 |
if job == 'download' and state == 'RUNNING':
|
| 107 |
running_download += count
|
| 108 |
if job == 'download' and state in {'PENDING', 'CONFIGURING'}:
|
| 109 |
pending_download += count
|
|
|
|
|
|
|
| 110 |
total_download = running_download + pending_download
|
| 111 |
return {
|
| 112 |
'running_dwpose': running_dwpose,
|
| 113 |
'running_download': running_download,
|
|
|
|
| 114 |
'pending_download_jobs': pending_download,
|
| 115 |
'total_download_jobs': total_download,
|
| 116 |
'job_state_counts': dict(job_counts),
|
| 117 |
'job_partition_state_counts': dict(partition_counts),
|
|
|
|
| 118 |
}
|
| 119 |
|
| 120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
def filesystem_avail_bytes(path: Path) -> int:
|
| 122 |
try:
|
| 123 |
proc = subprocess.run(['df', '-B1', str(path)], check=False, capture_output=True, text=True)
|
|
@@ -217,9 +309,11 @@ def run_sync(runtime_root: Path) -> str:
|
|
| 217 |
'--source-metadata-csv', str(runtime_root / 'SignVerse-2M-metadata_ori.csv'),
|
| 218 |
'--output-metadata-csv', str(runtime_root / 'SignVerse-2M-metadata_processed.csv'),
|
| 219 |
'--raw-video-dir', str(runtime_root / 'raw_video'),
|
|
|
|
| 220 |
'--raw-caption-dir', str(runtime_root / 'raw_caption'),
|
| 221 |
'--raw-metadata-dir', str(runtime_root / 'raw_metadata'),
|
| 222 |
'--dataset-dir', str(runtime_root / 'dataset'),
|
|
|
|
| 223 |
'--progress-path', str(runtime_root / 'archive_upload_progress.json'),
|
| 224 |
'--status-journal-path', str(runtime_root / 'upload_status_journal.jsonl'),
|
| 225 |
]
|
|
@@ -234,16 +328,22 @@ def run_sync(runtime_root: Path) -> str:
|
|
| 234 |
|
| 235 |
|
| 236 |
def main() -> None:
|
| 237 |
-
parser = argparse.ArgumentParser(description='Report
|
| 238 |
parser.add_argument('--runtime-root', default='/home/sf895/SignVerse-2M-runtime')
|
| 239 |
parser.add_argument('--username', default='sf895')
|
| 240 |
parser.add_argument('--no-sync', action='store_true')
|
| 241 |
parser.add_argument('--json', action='store_true')
|
|
|
|
|
|
|
|
|
|
| 242 |
args = parser.parse_args()
|
| 243 |
|
| 244 |
runtime_root = Path(args.runtime_root)
|
|
|
|
| 245 |
raw_dir = runtime_root / 'raw_video'
|
|
|
|
| 246 |
dataset_dir = runtime_root / 'dataset'
|
|
|
|
| 247 |
claims_dir = runtime_root / 'slurm' / 'state' / 'claims'
|
| 248 |
download_claims_dir = runtime_root / 'slurm' / 'state' / 'download_claims'
|
| 249 |
progress_path = runtime_root / 'archive_upload_progress.json'
|
|
@@ -254,29 +354,32 @@ def main() -> None:
|
|
| 254 |
if not args.no_sync:
|
| 255 |
sync_result = run_sync(runtime_root)
|
| 256 |
|
| 257 |
-
raw_complete:
|
| 258 |
raw_temp: list[Path] = []
|
| 259 |
-
|
| 260 |
-
|
|
|
|
|
|
|
| 261 |
if not path.is_file():
|
| 262 |
continue
|
| 263 |
if path.suffix.lower() in VIDEO_EXTS:
|
| 264 |
-
raw_complete.
|
| 265 |
else:
|
| 266 |
raw_temp.append(path)
|
| 267 |
|
| 268 |
-
raw_size = sum_file_sizes(raw_complete)
|
| 269 |
runtime_size = 0
|
| 270 |
if runtime_root.exists():
|
| 271 |
-
|
|
|
|
| 272 |
try:
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
continue
|
| 277 |
|
| 278 |
source_rows = read_source_manifest_count(source_csv)
|
| 279 |
progress = read_processed_progress(processed_csv)
|
|
|
|
| 280 |
|
| 281 |
payload = {
|
| 282 |
'sync_result': sync_result,
|
|
@@ -284,7 +387,7 @@ def main() -> None:
|
|
| 284 |
'raw_videos': len(raw_complete),
|
| 285 |
'raw_temp_files': len(raw_temp),
|
| 286 |
'sent_to_gpu': count_claims(claims_dir),
|
| 287 |
-
'processed_complete': count_complete(dataset_dir),
|
| 288 |
'active_downloads': count_claims(download_claims_dir),
|
| 289 |
'uploaded_archives': 0,
|
| 290 |
'uploaded_folders': 0,
|
|
@@ -308,6 +411,22 @@ def main() -> None:
|
|
| 308 |
payload['uploaded_archives'] = uploaded_archives
|
| 309 |
payload['uploaded_folders'] = uploaded_folders
|
| 310 |
payload.update(queue_status(args.username))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 311 |
payload['csv_row_match'] = (payload['processed_rows'] == payload['source_rows']) if payload['csv_ok'] else False
|
| 312 |
|
| 313 |
if args.json:
|
|
@@ -320,6 +439,16 @@ def main() -> None:
|
|
| 320 |
print(f"raw_temp_files={payload['raw_temp_files']}")
|
| 321 |
print(f"sent_to_gpu={payload['sent_to_gpu']}")
|
| 322 |
print(f"running_dwpose={payload['running_dwpose']}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 323 |
print(f"processed_complete={payload['processed_complete']}")
|
| 324 |
print(f"active_downloads={payload['active_downloads']}")
|
| 325 |
print(f"running_download_jobs={payload['running_download']}")
|
|
@@ -339,6 +468,14 @@ def main() -> None:
|
|
| 339 |
print(f"process_ok_rows={payload['process_ok_rows']}")
|
| 340 |
print(f"process_running_rows={payload['process_running_rows']}")
|
| 341 |
print(f"upload_uploaded_rows={payload['upload_uploaded_rows']}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 342 |
print(f"raw_size={human_bytes(payload['raw_size_bytes'])}")
|
| 343 |
print(f"runtime_size={human_bytes(payload['runtime_size_bytes'])}")
|
| 344 |
print(f"filesystem_avail={human_bytes(payload['filesystem_avail_bytes'])}")
|
|
|
|
| 4 |
import argparse
|
| 5 |
import csv
|
| 6 |
import json
|
| 7 |
+
import os
|
| 8 |
import re
|
| 9 |
import subprocess
|
| 10 |
from collections import Counter
|
| 11 |
from pathlib import Path
|
| 12 |
|
| 13 |
+
from utils.dataset_pool import count_complete
|
| 14 |
+
|
| 15 |
VIDEO_EXTS = {'.mp4', '.mkv', '.webm', '.mov'}
|
| 16 |
+
ARRAY_RANGE_RE = re.compile(r'^(\d+)_\[(.+)\]$')
|
| 17 |
PROCESSED_REQUIRED_COLUMNS = {
|
| 18 |
'video_id',
|
| 19 |
'download_status',
|
|
|
|
| 21 |
'upload_status',
|
| 22 |
'archive_name',
|
| 23 |
}
|
| 24 |
+
GPU_PARTITIONS = ['gpu', 'gpu-redhat', 'cgpu']
|
| 25 |
+
DEFAULT_VIDEOS_PER_DWPOSE_JOB = 20
|
| 26 |
|
| 27 |
|
| 28 |
def run_command(cmd: list[str]) -> str:
|
|
|
|
| 39 |
return sum(1 for _ in directory.glob('*.claim'))
|
| 40 |
|
| 41 |
|
| 42 |
+
def aggregate_claims_by_job_key(directory: Path) -> dict[str, int]:
|
| 43 |
+
counts: Counter[str] = Counter()
|
| 44 |
+
if not directory.exists():
|
| 45 |
+
return {}
|
| 46 |
+
for path in directory.glob('*.claim'):
|
| 47 |
+
try:
|
| 48 |
+
lines = path.read_text(encoding='utf-8').splitlines()
|
| 49 |
+
except OSError:
|
| 50 |
+
continue
|
| 51 |
+
job_key = ''
|
| 52 |
+
for line in lines:
|
| 53 |
+
if line.startswith('job_key='):
|
| 54 |
+
job_key = line.split('=', 1)[1].strip()
|
| 55 |
+
break
|
| 56 |
+
if job_key:
|
| 57 |
+
counts[job_key] += 1
|
| 58 |
+
return dict(counts)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def read_videos_per_dwpose_job(root_dir: Path) -> int:
|
| 62 |
+
worker = root_dir / 'slurm' / 'process_dwpose_array.slurm'
|
| 63 |
+
if not worker.exists():
|
| 64 |
+
return DEFAULT_VIDEOS_PER_DWPOSE_JOB
|
| 65 |
+
try:
|
| 66 |
+
for line in worker.read_text().splitlines():
|
| 67 |
+
if line.startswith('VIDEOS_PER_JOB='):
|
| 68 |
+
m = re.search(r'\$\{VIDEOS_PER_JOB:-([0-9]+)\}', line)
|
| 69 |
+
if m:
|
| 70 |
+
return int(m.group(1))
|
| 71 |
+
except Exception:
|
| 72 |
+
pass
|
| 73 |
+
return DEFAULT_VIDEOS_PER_DWPOSE_JOB
|
| 74 |
|
| 75 |
|
| 76 |
def sum_file_sizes(paths: list[Path]) -> int:
|
|
|
|
| 99 |
m = ARRAY_RANGE_RE.match(jobid_token)
|
| 100 |
if not m:
|
| 101 |
return 1
|
| 102 |
+
body = m.group(2)
|
| 103 |
if '%' in body:
|
| 104 |
body = body.split('%', 1)[0]
|
| 105 |
total = 0
|
|
|
|
| 122 |
output = run_command(['squeue', '-u', username, '-h', '-o', '%i|%j|%T|%P'])
|
| 123 |
job_counts: Counter[str] = Counter()
|
| 124 |
partition_counts: Counter[str] = Counter()
|
| 125 |
+
active_tasks_by_partition: Counter[str] = Counter()
|
| 126 |
running_dwpose = 0
|
| 127 |
running_download = 0
|
| 128 |
pending_download = 0
|
| 129 |
+
pending_dwpose = 0
|
| 130 |
if output:
|
| 131 |
for line in output.splitlines():
|
| 132 |
parts = line.split('|')
|
|
|
|
| 136 |
count = expand_task_count(jobid_token)
|
| 137 |
job_counts[f'{job}|{state}'] += count
|
| 138 |
partition_counts[f'{job}|{partition}|{state}'] += count
|
| 139 |
+
if state in {'RUNNING', 'PENDING', 'CONFIGURING'}:
|
| 140 |
+
active_tasks_by_partition[partition] += count
|
| 141 |
if job == 'dwpose' and state == 'RUNNING':
|
| 142 |
running_dwpose += count
|
| 143 |
if job == 'download' and state == 'RUNNING':
|
| 144 |
running_download += count
|
| 145 |
if job == 'download' and state in {'PENDING', 'CONFIGURING'}:
|
| 146 |
pending_download += count
|
| 147 |
+
if job == 'dwpose' and state in {'PENDING', 'CONFIGURING'}:
|
| 148 |
+
pending_dwpose += count
|
| 149 |
total_download = running_download + pending_download
|
| 150 |
return {
|
| 151 |
'running_dwpose': running_dwpose,
|
| 152 |
'running_download': running_download,
|
| 153 |
+
'pending_dwpose_jobs': pending_dwpose,
|
| 154 |
'pending_download_jobs': pending_download,
|
| 155 |
'total_download_jobs': total_download,
|
| 156 |
'job_state_counts': dict(job_counts),
|
| 157 |
'job_partition_state_counts': dict(partition_counts),
|
| 158 |
+
'active_tasks_by_partition': dict(active_tasks_by_partition),
|
| 159 |
}
|
| 160 |
|
| 161 |
|
| 162 |
+
def gpu_partition_capacity(partitions: list[str], active_tasks_by_partition: dict[str, int]) -> list[dict[str, object]]:
|
| 163 |
+
qos_limit_by_part: dict[str, int] = {}
|
| 164 |
+
qos_output = run_command(['sacctmgr', 'show', 'qos', 'format=Name,MaxSubmitPU', '-P'])
|
| 165 |
+
if qos_output:
|
| 166 |
+
for line in qos_output.splitlines():
|
| 167 |
+
if not line.strip() or '|' not in line:
|
| 168 |
+
continue
|
| 169 |
+
name, max_submit = line.split('|', 1)
|
| 170 |
+
name = name.strip()
|
| 171 |
+
max_submit = max_submit.strip()
|
| 172 |
+
if name in partitions and max_submit:
|
| 173 |
+
try:
|
| 174 |
+
qos_limit_by_part[name] = int(max_submit)
|
| 175 |
+
except ValueError:
|
| 176 |
+
pass
|
| 177 |
+
|
| 178 |
+
rows: list[dict[str, object]] = []
|
| 179 |
+
for partition in partitions:
|
| 180 |
+
free_gpus = 0
|
| 181 |
+
nodes_output = run_command(['sinfo', '-h', '-N', '-p', partition, '-o', '%N'])
|
| 182 |
+
nodes = [line.strip() for line in nodes_output.splitlines() if line.strip()]
|
| 183 |
+
for node in nodes:
|
| 184 |
+
node_line = run_command(['scontrol', 'show', 'node', node, '-o'])
|
| 185 |
+
if not node_line:
|
| 186 |
+
continue
|
| 187 |
+
state_m = re.search(r'\bState=([^ ]+)', node_line)
|
| 188 |
+
state = state_m.group(1).lower() if state_m else ''
|
| 189 |
+
if any(flag in state for flag in ('drain', 'drained', 'down', 'fail', 'inval')):
|
| 190 |
+
continue
|
| 191 |
+
cfg_m = re.search(r'\bCfgTRES=.*?(?:,|^)gres/gpu=(\d+)', node_line)
|
| 192 |
+
alloc_m = re.search(r'\bAllocTRES=.*?(?:,|^)gres/gpu=(\d+)', node_line)
|
| 193 |
+
total = int(cfg_m.group(1)) if cfg_m else 0
|
| 194 |
+
used = int(alloc_m.group(1)) if alloc_m else 0
|
| 195 |
+
free = total - used
|
| 196 |
+
if free > 0:
|
| 197 |
+
free_gpus += free
|
| 198 |
+
active_tasks = int(active_tasks_by_partition.get(partition, 0))
|
| 199 |
+
qos_limit = qos_limit_by_part.get(partition)
|
| 200 |
+
submit_slots = free_gpus
|
| 201 |
+
if qos_limit is not None:
|
| 202 |
+
submit_slots = min(submit_slots, max(0, qos_limit - active_tasks))
|
| 203 |
+
rows.append({
|
| 204 |
+
'partition': partition,
|
| 205 |
+
'free_gpus': free_gpus,
|
| 206 |
+
'active_tasks': active_tasks,
|
| 207 |
+
'qos_limit': qos_limit,
|
| 208 |
+
'submit_slots': submit_slots,
|
| 209 |
+
})
|
| 210 |
+
return rows
|
| 211 |
+
|
| 212 |
+
|
| 213 |
def filesystem_avail_bytes(path: Path) -> int:
|
| 214 |
try:
|
| 215 |
proc = subprocess.run(['df', '-B1', str(path)], check=False, capture_output=True, text=True)
|
|
|
|
| 309 |
'--source-metadata-csv', str(runtime_root / 'SignVerse-2M-metadata_ori.csv'),
|
| 310 |
'--output-metadata-csv', str(runtime_root / 'SignVerse-2M-metadata_processed.csv'),
|
| 311 |
'--raw-video-dir', str(runtime_root / 'raw_video'),
|
| 312 |
+
'--scratch-raw-video-dir', str(Path(f'/scratch/{os.environ.get("USER", "sf895")}/SignVerse-2M-runtime/raw_video')),
|
| 313 |
'--raw-caption-dir', str(runtime_root / 'raw_caption'),
|
| 314 |
'--raw-metadata-dir', str(runtime_root / 'raw_metadata'),
|
| 315 |
'--dataset-dir', str(runtime_root / 'dataset'),
|
| 316 |
+
'--scratch-dataset-dir', str(Path(f'/scratch/{os.environ.get("USER", "sf895")}/SignVerse-2M-runtime/dataset')),
|
| 317 |
'--progress-path', str(runtime_root / 'archive_upload_progress.json'),
|
| 318 |
'--status-journal-path', str(runtime_root / 'upload_status_journal.jsonl'),
|
| 319 |
]
|
|
|
|
| 328 |
|
| 329 |
|
| 330 |
def main() -> None:
|
| 331 |
+
parser = argparse.ArgumentParser(description='Report SignVerse runtime status.')
|
| 332 |
parser.add_argument('--runtime-root', default='/home/sf895/SignVerse-2M-runtime')
|
| 333 |
parser.add_argument('--username', default='sf895')
|
| 334 |
parser.add_argument('--no-sync', action='store_true')
|
| 335 |
parser.add_argument('--json', action='store_true')
|
| 336 |
+
parser.add_argument('--include-partitions', action='store_true')
|
| 337 |
+
parser.add_argument('--scan-complete', action='store_true')
|
| 338 |
+
parser.add_argument('--scan-runtime-size', action='store_true')
|
| 339 |
args = parser.parse_args()
|
| 340 |
|
| 341 |
runtime_root = Path(args.runtime_root)
|
| 342 |
+
root_dir = Path('/cache/home/sf895/SignVerse-2M')
|
| 343 |
raw_dir = runtime_root / 'raw_video'
|
| 344 |
+
scratch_raw_dir = Path(f'/scratch/{os.environ.get("USER", "sf895")}/SignVerse-2M-runtime/raw_video')
|
| 345 |
dataset_dir = runtime_root / 'dataset'
|
| 346 |
+
scratch_dataset_dir = Path(f'/scratch/{os.environ.get("USER", "sf895")}/SignVerse-2M-runtime/dataset')
|
| 347 |
claims_dir = runtime_root / 'slurm' / 'state' / 'claims'
|
| 348 |
download_claims_dir = runtime_root / 'slurm' / 'state' / 'download_claims'
|
| 349 |
progress_path = runtime_root / 'archive_upload_progress.json'
|
|
|
|
| 354 |
if not args.no_sync:
|
| 355 |
sync_result = run_sync(runtime_root)
|
| 356 |
|
| 357 |
+
raw_complete: dict[str, Path] = {}
|
| 358 |
raw_temp: list[Path] = []
|
| 359 |
+
for current_raw_dir in [raw_dir, scratch_raw_dir]:
|
| 360 |
+
if not current_raw_dir.exists():
|
| 361 |
+
continue
|
| 362 |
+
for path in current_raw_dir.iterdir():
|
| 363 |
if not path.is_file():
|
| 364 |
continue
|
| 365 |
if path.suffix.lower() in VIDEO_EXTS:
|
| 366 |
+
raw_complete.setdefault(path.stem, path)
|
| 367 |
else:
|
| 368 |
raw_temp.append(path)
|
| 369 |
|
| 370 |
+
raw_size = sum_file_sizes(list(raw_complete.values()))
|
| 371 |
runtime_size = 0
|
| 372 |
if runtime_root.exists():
|
| 373 |
+
proc = subprocess.run(['du', '-sb', str(runtime_root)], check=False, capture_output=True, text=True)
|
| 374 |
+
if proc.returncode == 0 and proc.stdout.strip():
|
| 375 |
try:
|
| 376 |
+
runtime_size = int(proc.stdout.split()[0])
|
| 377 |
+
except Exception:
|
| 378 |
+
runtime_size = 0
|
|
|
|
| 379 |
|
| 380 |
source_rows = read_source_manifest_count(source_csv)
|
| 381 |
progress = read_processed_progress(processed_csv)
|
| 382 |
+
videos_per_dwpose_job = read_videos_per_dwpose_job(root_dir)
|
| 383 |
|
| 384 |
payload = {
|
| 385 |
'sync_result': sync_result,
|
|
|
|
| 387 |
'raw_videos': len(raw_complete),
|
| 388 |
'raw_temp_files': len(raw_temp),
|
| 389 |
'sent_to_gpu': count_claims(claims_dir),
|
| 390 |
+
'processed_complete': count_complete(dataset_dir, scratch_dataset_dir),
|
| 391 |
'active_downloads': count_claims(download_claims_dir),
|
| 392 |
'uploaded_archives': 0,
|
| 393 |
'uploaded_folders': 0,
|
|
|
|
| 411 |
payload['uploaded_archives'] = uploaded_archives
|
| 412 |
payload['uploaded_folders'] = uploaded_folders
|
| 413 |
payload.update(queue_status(args.username))
|
| 414 |
+
process_claims_by_job = aggregate_claims_by_job_key(claims_dir)
|
| 415 |
+
payload['videos_per_dwpose_job'] = videos_per_dwpose_job
|
| 416 |
+
payload['process_claim_job_keys'] = len(process_claims_by_job)
|
| 417 |
+
payload['process_claim_videos_actual'] = sum(process_claims_by_job.values())
|
| 418 |
+
payload['process_claim_videos_max_per_job'] = max(process_claims_by_job.values(), default=0)
|
| 419 |
+
payload['running_dwpose_jobs'] = payload['running_dwpose']
|
| 420 |
+
payload['running_dwpose_videos_estimated'] = payload['running_dwpose'] * videos_per_dwpose_job
|
| 421 |
+
payload['pending_dwpose_videos_estimated'] = payload['pending_dwpose_jobs'] * videos_per_dwpose_job
|
| 422 |
+
payload['total_dwpose_jobs'] = payload['running_dwpose'] + payload['pending_dwpose_jobs']
|
| 423 |
+
payload['total_dwpose_videos_estimated'] = payload['running_dwpose_videos_estimated'] + payload['pending_dwpose_videos_estimated']
|
| 424 |
+
if args.include_partitions:
|
| 425 |
+
payload['gpu_partition_capacity'] = gpu_partition_capacity(GPU_PARTITIONS, payload.get('active_tasks_by_partition', {}))
|
| 426 |
+
else:
|
| 427 |
+
payload['gpu_partition_capacity'] = []
|
| 428 |
+
payload['job_partition_state_counts'] = {}
|
| 429 |
+
payload['active_tasks_by_partition'] = {}
|
| 430 |
payload['csv_row_match'] = (payload['processed_rows'] == payload['source_rows']) if payload['csv_ok'] else False
|
| 431 |
|
| 432 |
if args.json:
|
|
|
|
| 439 |
print(f"raw_temp_files={payload['raw_temp_files']}")
|
| 440 |
print(f"sent_to_gpu={payload['sent_to_gpu']}")
|
| 441 |
print(f"running_dwpose={payload['running_dwpose']}")
|
| 442 |
+
print(f"running_dwpose_jobs={payload['running_dwpose_jobs']}")
|
| 443 |
+
print(f"pending_dwpose_jobs={payload['pending_dwpose_jobs']}")
|
| 444 |
+
print(f"total_dwpose_jobs={payload['total_dwpose_jobs']}")
|
| 445 |
+
print(f"videos_per_dwpose_job={payload['videos_per_dwpose_job']}")
|
| 446 |
+
print(f"process_claim_job_keys={payload['process_claim_job_keys']}")
|
| 447 |
+
print(f"process_claim_videos_actual={payload['process_claim_videos_actual']}")
|
| 448 |
+
print(f"process_claim_videos_max_per_job={payload['process_claim_videos_max_per_job']}")
|
| 449 |
+
print(f"running_dwpose_videos_estimated={payload['running_dwpose_videos_estimated']}")
|
| 450 |
+
print(f"pending_dwpose_videos_estimated={payload['pending_dwpose_videos_estimated']}")
|
| 451 |
+
print(f"total_dwpose_videos_estimated={payload['total_dwpose_videos_estimated']}")
|
| 452 |
print(f"processed_complete={payload['processed_complete']}")
|
| 453 |
print(f"active_downloads={payload['active_downloads']}")
|
| 454 |
print(f"running_download_jobs={payload['running_download']}")
|
|
|
|
| 468 |
print(f"process_ok_rows={payload['process_ok_rows']}")
|
| 469 |
print(f"process_running_rows={payload['process_running_rows']}")
|
| 470 |
print(f"upload_uploaded_rows={payload['upload_uploaded_rows']}")
|
| 471 |
+
for key in sorted(payload.get('job_partition_state_counts', {})):
|
| 472 |
+
print(f"job_partition_state[{key}]={payload['job_partition_state_counts'][key]}")
|
| 473 |
+
for row in payload.get('gpu_partition_capacity', []):
|
| 474 |
+
qos_limit = row['qos_limit'] if row['qos_limit'] is not None else 'na'
|
| 475 |
+
print(
|
| 476 |
+
f"gpu_partition[{row['partition']}]=free_gpus={row['free_gpus']},"
|
| 477 |
+
f"active_tasks={row['active_tasks']},qos_limit={qos_limit},submit_slots={row['submit_slots']}"
|
| 478 |
+
)
|
| 479 |
print(f"raw_size={human_bytes(payload['raw_size_bytes'])}")
|
| 480 |
print(f"runtime_size={human_bytes(payload['runtime_size_bytes'])}")
|
| 481 |
print(f"filesystem_avail={human_bytes(payload['filesystem_avail_bytes'])}")
|
scripts/sync_processed_csv_from_runtime.py
CHANGED
|
@@ -35,6 +35,9 @@ DEFAULT_COLUMNS = [
|
|
| 35 |
VIDEO_EXTS = {'.mp4', '.mkv', '.webm', '.mov'}
|
| 36 |
|
| 37 |
|
|
|
|
|
|
|
|
|
|
| 38 |
def read_csv_rows(path: Path) -> Tuple[List[Dict[str, str]], List[str]]:
|
| 39 |
text = path.read_text(encoding='utf-8-sig')
|
| 40 |
lines = [line for line in text.splitlines() if line.strip()]
|
|
@@ -104,9 +107,11 @@ def main():
|
|
| 104 |
ap.add_argument('--source-metadata-csv', type=Path, required=True)
|
| 105 |
ap.add_argument('--output-metadata-csv', type=Path, required=True)
|
| 106 |
ap.add_argument('--raw-video-dir', type=Path, required=True)
|
|
|
|
| 107 |
ap.add_argument('--raw-caption-dir', type=Path, required=True)
|
| 108 |
ap.add_argument('--raw-metadata-dir', type=Path, required=True)
|
| 109 |
ap.add_argument('--dataset-dir', type=Path, required=True)
|
|
|
|
| 110 |
ap.add_argument('--progress-path', type=Path, required=True)
|
| 111 |
ap.add_argument('--status-journal-path', type=Path, required=True)
|
| 112 |
args = ap.parse_args()
|
|
@@ -131,9 +136,15 @@ def main():
|
|
| 131 |
merged[k] = out_by_id[vid].get(k, '')
|
| 132 |
rows.append(merged)
|
| 133 |
|
| 134 |
-
raw_videos = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
raw_metadata = {p.stem: p for p in args.raw_metadata_dir.glob('*.json')} if args.raw_metadata_dir.exists() else {}
|
| 136 |
-
complete =
|
| 137 |
process_claims_dir = args.dataset_dir.parent / 'slurm' / 'state' / 'claims'
|
| 138 |
download_claims_dir = args.dataset_dir.parent / 'slurm' / 'state' / 'download_claims'
|
| 139 |
process_claims = {p.stem for p in process_claims_dir.glob('*.claim')} if process_claims_dir.exists() else set()
|
|
|
|
| 35 |
VIDEO_EXTS = {'.mp4', '.mkv', '.webm', '.mov'}
|
| 36 |
|
| 37 |
|
| 38 |
+
from utils.dataset_pool import complete_video_ids
|
| 39 |
+
|
| 40 |
+
|
| 41 |
def read_csv_rows(path: Path) -> Tuple[List[Dict[str, str]], List[str]]:
|
| 42 |
text = path.read_text(encoding='utf-8-sig')
|
| 43 |
lines = [line for line in text.splitlines() if line.strip()]
|
|
|
|
| 107 |
ap.add_argument('--source-metadata-csv', type=Path, required=True)
|
| 108 |
ap.add_argument('--output-metadata-csv', type=Path, required=True)
|
| 109 |
ap.add_argument('--raw-video-dir', type=Path, required=True)
|
| 110 |
+
ap.add_argument('--scratch-raw-video-dir', type=Path, default=None)
|
| 111 |
ap.add_argument('--raw-caption-dir', type=Path, required=True)
|
| 112 |
ap.add_argument('--raw-metadata-dir', type=Path, required=True)
|
| 113 |
ap.add_argument('--dataset-dir', type=Path, required=True)
|
| 114 |
+
ap.add_argument('--scratch-dataset-dir', type=Path, default=None)
|
| 115 |
ap.add_argument('--progress-path', type=Path, required=True)
|
| 116 |
ap.add_argument('--status-journal-path', type=Path, required=True)
|
| 117 |
args = ap.parse_args()
|
|
|
|
| 136 |
merged[k] = out_by_id[vid].get(k, '')
|
| 137 |
rows.append(merged)
|
| 138 |
|
| 139 |
+
raw_videos = {}
|
| 140 |
+
if args.raw_video_dir.exists():
|
| 141 |
+
raw_videos.update({p.stem: p for p in args.raw_video_dir.iterdir() if p.is_file() and p.suffix.lower() in VIDEO_EXTS})
|
| 142 |
+
if args.scratch_raw_video_dir is not None and args.scratch_raw_video_dir.exists():
|
| 143 |
+
for p in args.scratch_raw_video_dir.iterdir():
|
| 144 |
+
if p.is_file() and p.suffix.lower() in VIDEO_EXTS and p.stem not in raw_videos:
|
| 145 |
+
raw_videos[p.stem] = p
|
| 146 |
raw_metadata = {p.stem: p for p in args.raw_metadata_dir.glob('*.json')} if args.raw_metadata_dir.exists() else {}
|
| 147 |
+
complete = complete_video_ids(args.dataset_dir, args.scratch_dataset_dir)
|
| 148 |
process_claims_dir = args.dataset_dir.parent / 'slurm' / 'state' / 'claims'
|
| 149 |
download_claims_dir = args.dataset_dir.parent / 'slurm' / 'state' / 'download_claims'
|
| 150 |
process_claims = {p.stem for p in process_claims_dir.glob('*.claim')} if process_claims_dir.exists() else set()
|
slurm/orchestrator_autorestart.slurm
CHANGED
|
@@ -6,26 +6,29 @@
|
|
| 6 |
#SBATCH --cpus-per-task=1
|
| 7 |
#SBATCH --mem=512M
|
| 8 |
#SBATCH --time=12:30:00
|
| 9 |
-
#SBATCH --output=/
|
| 10 |
-
#SBATCH --error=/
|
| 11 |
|
| 12 |
set -euo pipefail
|
| 13 |
|
| 14 |
ROOT_DIR="${ROOT_DIR:-/cache/home/sf895/SignVerse-2M}"
|
| 15 |
RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
|
|
|
| 16 |
ORCH_SUBMIT_SCRIPT="${ORCH_SUBMIT_SCRIPT:-$ROOT_DIR/reproduce_independently_slurm.sh}"
|
| 17 |
STOP_FILE="${STOP_FILE:-$RUNTIME_ROOT/STOP_AUTORESTART}"
|
| 18 |
ROTATE_SECONDS="${ROTATE_SECONDS:-43200}"
|
| 19 |
CHECK_INTERVAL_SECONDS="${CHECK_INTERVAL_SECONDS:-60}"
|
| 20 |
ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-12:00:00}"
|
| 21 |
-
LAUNCHER_SCRIPT="${LAUNCHER_SCRIPT:-$
|
| 22 |
LAUNCHER_PARTITION="${LAUNCHER_PARTITION:-main}"
|
| 23 |
LAUNCHER_ACCOUNT="${LAUNCHER_ACCOUNT:-}"
|
| 24 |
LAUNCHER_TIME="${LAUNCHER_TIME:-12:30:00}"
|
| 25 |
LAUNCHER_CPUS_PER_TASK="${LAUNCHER_CPUS_PER_TASK:-1}"
|
| 26 |
LAUNCHER_MEM="${LAUNCHER_MEM:-512M}"
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
mkdir -p "$RUNTIME_ROOT/slurm/logs"
|
| 29 |
|
| 30 |
now_ts() {
|
| 31 |
date '+%Y-%m-%d %H:%M:%S'
|
|
@@ -39,9 +42,34 @@ current_orchestrator_ids() {
|
|
| 39 |
squeue -u "$USER" -h -o '%A|%j|%T' | awk -F'|' '$2=="sign-dwpose-orch" && ($3=="RUNNING" || $3=="PENDING" || $3=="CONFIGURING") {print $1}'
|
| 40 |
}
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
submit_orchestrator() {
|
| 43 |
log "submitting orchestration with ORCHESTRATOR_TIME=$ORCHESTRATOR_TIME"
|
| 44 |
-
env
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
}
|
| 46 |
|
| 47 |
cancel_orchestrators() {
|
|
@@ -62,7 +90,7 @@ submit_next_launcher() {
|
|
| 62 |
if [[ -n "$LAUNCHER_ACCOUNT" ]]; then
|
| 63 |
cmd+=("--account=$LAUNCHER_ACCOUNT")
|
| 64 |
fi
|
| 65 |
-
cmd+=("--export=ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,ORCH_SUBMIT_SCRIPT=$ORCH_SUBMIT_SCRIPT,STOP_FILE=$STOP_FILE,ROTATE_SECONDS=$ROTATE_SECONDS,CHECK_INTERVAL_SECONDS=$CHECK_INTERVAL_SECONDS,ORCHESTRATOR_TIME=$ORCHESTRATOR_TIME,LAUNCHER_SCRIPT=$LAUNCHER_SCRIPT,LAUNCHER_PARTITION=$LAUNCHER_PARTITION,LAUNCHER_ACCOUNT=$LAUNCHER_ACCOUNT,LAUNCHER_TIME=$LAUNCHER_TIME,LAUNCHER_CPUS_PER_TASK=$LAUNCHER_CPUS_PER_TASK,LAUNCHER_MEM=$LAUNCHER_MEM")
|
| 66 |
cmd+=("$LAUNCHER_SCRIPT")
|
| 67 |
log "submitting next launcher"
|
| 68 |
"${cmd[@]}"
|
|
@@ -75,6 +103,7 @@ if [[ -f "$STOP_FILE" ]]; then
|
|
| 75 |
fi
|
| 76 |
|
| 77 |
if [[ -z "$(current_orchestrator_ids || true)" ]]; then
|
|
|
|
| 78 |
submit_orchestrator
|
| 79 |
else
|
| 80 |
log "existing orchestration detected; not submitting a duplicate"
|
|
@@ -86,7 +115,8 @@ while (( elapsed < ROTATE_SECONDS )); do
|
|
| 86 |
exit 0
|
| 87 |
fi
|
| 88 |
if [[ -z "$(current_orchestrator_ids || true)" ]]; then
|
| 89 |
-
log "no live orchestration detected;
|
|
|
|
| 90 |
submit_orchestrator
|
| 91 |
fi
|
| 92 |
sleep "$CHECK_INTERVAL_SECONDS"
|
|
@@ -98,7 +128,8 @@ if [[ -f "$STOP_FILE" ]]; then
|
|
| 98 |
exit 0
|
| 99 |
fi
|
| 100 |
|
| 101 |
-
log "rotation boundary reached;
|
|
|
|
| 102 |
cancel_orchestrators
|
| 103 |
sleep 5
|
| 104 |
submit_orchestrator
|
|
|
|
| 6 |
#SBATCH --cpus-per-task=1
|
| 7 |
#SBATCH --mem=512M
|
| 8 |
#SBATCH --time=12:30:00
|
| 9 |
+
#SBATCH --output=/scratch/%u/SignVerse-2M-runtime/slurm/logs/orchestrator_launcher_%j.out
|
| 10 |
+
#SBATCH --error=/scratch/%u/SignVerse-2M-runtime/slurm/logs/orchestrator_launcher_%j.err
|
| 11 |
|
| 12 |
set -euo pipefail
|
| 13 |
|
| 14 |
ROOT_DIR="${ROOT_DIR:-/cache/home/sf895/SignVerse-2M}"
|
| 15 |
RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
| 16 |
+
RUNTIME_LOG_ROOT="${RUNTIME_LOG_ROOT:-/scratch/$USER/SignVerse-2M-runtime/slurm/logs}"
|
| 17 |
ORCH_SUBMIT_SCRIPT="${ORCH_SUBMIT_SCRIPT:-$ROOT_DIR/reproduce_independently_slurm.sh}"
|
| 18 |
STOP_FILE="${STOP_FILE:-$RUNTIME_ROOT/STOP_AUTORESTART}"
|
| 19 |
ROTATE_SECONDS="${ROTATE_SECONDS:-43200}"
|
| 20 |
CHECK_INTERVAL_SECONDS="${CHECK_INTERVAL_SECONDS:-60}"
|
| 21 |
ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-12:00:00}"
|
| 22 |
+
LAUNCHER_SCRIPT="${LAUNCHER_SCRIPT:-$ROOT_DIR/slurm/orchestrator_autorestart.slurm}"
|
| 23 |
LAUNCHER_PARTITION="${LAUNCHER_PARTITION:-main}"
|
| 24 |
LAUNCHER_ACCOUNT="${LAUNCHER_ACCOUNT:-}"
|
| 25 |
LAUNCHER_TIME="${LAUNCHER_TIME:-12:30:00}"
|
| 26 |
LAUNCHER_CPUS_PER_TASK="${LAUNCHER_CPUS_PER_TASK:-1}"
|
| 27 |
LAUNCHER_MEM="${LAUNCHER_MEM:-512M}"
|
| 28 |
+
REAP_PENDING_ON_ROTATE="${REAP_PENDING_ON_ROTATE:-1}"
|
| 29 |
+
REAP_JOB_NAMES="${REAP_JOB_NAMES:-dwpose download}"
|
| 30 |
|
| 31 |
+
mkdir -p "$RUNTIME_ROOT/slurm/logs" "$RUNTIME_LOG_ROOT"
|
| 32 |
|
| 33 |
now_ts() {
|
| 34 |
date '+%Y-%m-%d %H:%M:%S'
|
|
|
|
| 42 |
squeue -u "$USER" -h -o '%A|%j|%T' | awk -F'|' '$2=="sign-dwpose-orch" && ($3=="RUNNING" || $3=="PENDING" || $3=="CONFIGURING") {print $1}'
|
| 43 |
}
|
| 44 |
|
| 45 |
+
current_reapable_pending_ids() {
|
| 46 |
+
local names_regex
|
| 47 |
+
names_regex="$(printf '%s\n' $REAP_JOB_NAMES | paste -sd'|' -)"
|
| 48 |
+
squeue -u "$USER" -h -o '%A|%j|%T' | awk -F'|' -v names_re="$names_regex" '$2 ~ ("^(" names_re ")$") && $3=="PENDING" {print $1}' | sort -u
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
cancel_reapable_pending_jobs() {
|
| 52 |
+
[[ "$REAP_PENDING_ON_ROTATE" == "1" ]] || return 0
|
| 53 |
+
local ids
|
| 54 |
+
ids="$(current_reapable_pending_ids || true)"
|
| 55 |
+
if [[ -n "$ids" ]]; then
|
| 56 |
+
log "cancelling stale pending worker jobs: $(echo "$ids" | tr '\n' ' ')"
|
| 57 |
+
scancel $ids || true
|
| 58 |
+
else
|
| 59 |
+
log "no stale pending worker jobs to cancel"
|
| 60 |
+
fi
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
submit_orchestrator() {
|
| 64 |
log "submitting orchestration with ORCHESTRATOR_TIME=$ORCHESTRATOR_TIME"
|
| 65 |
+
env \
|
| 66 |
+
-u SLURM_JOB_ID \
|
| 67 |
+
-u SLURM_JOB_NAME \
|
| 68 |
+
-u SLURM_JOB_NODELIST \
|
| 69 |
+
-u SLURM_ARRAY_JOB_ID \
|
| 70 |
+
-u SLURM_ARRAY_TASK_ID \
|
| 71 |
+
ORCHESTRATOR_TIME="$ORCHESTRATOR_TIME" \
|
| 72 |
+
bash "$ORCH_SUBMIT_SCRIPT"
|
| 73 |
}
|
| 74 |
|
| 75 |
cancel_orchestrators() {
|
|
|
|
| 90 |
if [[ -n "$LAUNCHER_ACCOUNT" ]]; then
|
| 91 |
cmd+=("--account=$LAUNCHER_ACCOUNT")
|
| 92 |
fi
|
| 93 |
+
cmd+=("--export=ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,RUNTIME_LOG_ROOT=$RUNTIME_LOG_ROOT,ORCH_SUBMIT_SCRIPT=$ORCH_SUBMIT_SCRIPT,STOP_FILE=$STOP_FILE,ROTATE_SECONDS=$ROTATE_SECONDS,CHECK_INTERVAL_SECONDS=$CHECK_INTERVAL_SECONDS,ORCHESTRATOR_TIME=$ORCHESTRATOR_TIME,LAUNCHER_SCRIPT=$LAUNCHER_SCRIPT,LAUNCHER_PARTITION=$LAUNCHER_PARTITION,LAUNCHER_ACCOUNT=$LAUNCHER_ACCOUNT,LAUNCHER_TIME=$LAUNCHER_TIME,LAUNCHER_CPUS_PER_TASK=$LAUNCHER_CPUS_PER_TASK,LAUNCHER_MEM=$LAUNCHER_MEM,REAP_PENDING_ON_ROTATE=$REAP_PENDING_ON_ROTATE,REAP_JOB_NAMES=$REAP_JOB_NAMES")
|
| 94 |
cmd+=("$LAUNCHER_SCRIPT")
|
| 95 |
log "submitting next launcher"
|
| 96 |
"${cmd[@]}"
|
|
|
|
| 103 |
fi
|
| 104 |
|
| 105 |
if [[ -z "$(current_orchestrator_ids || true)" ]]; then
|
| 106 |
+
cancel_reapable_pending_jobs
|
| 107 |
submit_orchestrator
|
| 108 |
else
|
| 109 |
log "existing orchestration detected; not submitting a duplicate"
|
|
|
|
| 115 |
exit 0
|
| 116 |
fi
|
| 117 |
if [[ -z "$(current_orchestrator_ids || true)" ]]; then
|
| 118 |
+
log "no live orchestration detected; reaping old pending workers before replacement"
|
| 119 |
+
cancel_reapable_pending_jobs
|
| 120 |
submit_orchestrator
|
| 121 |
fi
|
| 122 |
sleep "$CHECK_INTERVAL_SECONDS"
|
|
|
|
| 128 |
exit 0
|
| 129 |
fi
|
| 130 |
|
| 131 |
+
log "rotation boundary reached; reaping old pending workers and restarting orchestration"
|
| 132 |
+
cancel_reapable_pending_jobs
|
| 133 |
cancel_orchestrators
|
| 134 |
sleep 5
|
| 135 |
submit_orchestrator
|
slurm/process_download_array.slurm
CHANGED
|
@@ -18,10 +18,14 @@ CONDA_ENV="${CONDA_ENV:-signx2}"
|
|
| 18 |
SOURCE_METADATA_CSV="${SOURCE_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_ori.csv}"
|
| 19 |
OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv}"
|
| 20 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
|
|
|
|
|
|
|
|
|
| 21 |
RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}"
|
| 22 |
RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}"
|
| 23 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
| 24 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
|
|
|
| 25 |
PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}"
|
| 26 |
DOWNLOAD_CLAIM_DIR="${DOWNLOAD_CLAIM_DIR:-$STATE_ROOT/slurm/state/download_claims}"
|
| 27 |
DOWNLOAD_CSV_LOCK_PATH="${DOWNLOAD_CSV_LOCK_PATH:-$STATE_ROOT/SignVerse-2M-metadata_processed.csv.lock}"
|
|
@@ -84,10 +88,14 @@ cmd=(python "$PIPELINE01"
|
|
| 84 |
--source-metadata-csv "$SOURCE_METADATA_CSV"
|
| 85 |
--output-metadata-csv "$OUTPUT_METADATA_CSV"
|
| 86 |
--raw-video-dir "$RAW_VIDEO_DIR"
|
|
|
|
|
|
|
|
|
|
| 87 |
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 88 |
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 89 |
--dataset-dir "$DATASET_DIR"
|
| 90 |
--stats-npz "$STATS_NPZ"
|
|
|
|
| 91 |
--csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH"
|
| 92 |
--limit 1
|
| 93 |
--video-ids="$VIDEO_ID"
|
|
|
|
| 18 |
SOURCE_METADATA_CSV="${SOURCE_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_ori.csv}"
|
| 19 |
OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv}"
|
| 20 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
| 21 |
+
SCRATCH_RAW_VIDEO_DIR="${SCRATCH_RAW_VIDEO_DIR:-/scratch/$USER/SignVerse-2M-runtime/raw_video}"
|
| 22 |
+
HOME_RAW_VIDEO_LIMIT="${HOME_RAW_VIDEO_LIMIT:-180}"
|
| 23 |
+
SCRATCH_RAW_VIDEO_LIMIT="${SCRATCH_RAW_VIDEO_LIMIT:-2800}"
|
| 24 |
RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}"
|
| 25 |
RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}"
|
| 26 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
| 27 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
| 28 |
+
STATUS_JOURNAL_PATH="${STATUS_JOURNAL_PATH:-$RUNTIME_ROOT/upload_status_journal.jsonl}"
|
| 29 |
PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}"
|
| 30 |
DOWNLOAD_CLAIM_DIR="${DOWNLOAD_CLAIM_DIR:-$STATE_ROOT/slurm/state/download_claims}"
|
| 31 |
DOWNLOAD_CSV_LOCK_PATH="${DOWNLOAD_CSV_LOCK_PATH:-$STATE_ROOT/SignVerse-2M-metadata_processed.csv.lock}"
|
|
|
|
| 88 |
--source-metadata-csv "$SOURCE_METADATA_CSV"
|
| 89 |
--output-metadata-csv "$OUTPUT_METADATA_CSV"
|
| 90 |
--raw-video-dir "$RAW_VIDEO_DIR"
|
| 91 |
+
--scratch-raw-video-dir "$SCRATCH_RAW_VIDEO_DIR"
|
| 92 |
+
--home-raw-video-limit "$HOME_RAW_VIDEO_LIMIT"
|
| 93 |
+
--scratch-raw-video-limit "$SCRATCH_RAW_VIDEO_LIMIT"
|
| 94 |
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 95 |
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 96 |
--dataset-dir "$DATASET_DIR"
|
| 97 |
--stats-npz "$STATS_NPZ"
|
| 98 |
+
--status-journal-path "$STATUS_JOURNAL_PATH"
|
| 99 |
--csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH"
|
| 100 |
--limit 1
|
| 101 |
--video-ids="$VIDEO_ID"
|
slurm/process_dwpose_array.slurm
CHANGED
|
@@ -15,10 +15,13 @@ ROOT_DIR="${ROOT_DIR:-/home/sf895/SignVerse-2M}"
|
|
| 15 |
RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
| 16 |
STATE_ROOT="${STATE_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
| 17 |
CONDA_SH="${CONDA_SH:-/home/sf895/miniconda3/etc/profile.d/conda.sh}"
|
| 18 |
-
CONDA_ENV="${CONDA_ENV:-
|
| 19 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
|
|
|
| 20 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
|
|
|
| 21 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
|
|
|
| 22 |
PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}"
|
| 23 |
FPS="${FPS:-24}"
|
| 24 |
TMP_ROOT="${TMP_ROOT:-${SLURM_TMPDIR:-/tmp}}"
|
|
@@ -27,6 +30,15 @@ DELETE_SOURCE_ON_SUCCESS="${DELETE_SOURCE_ON_SUCCESS:-0}"
|
|
| 27 |
CLAIM_DIR="${CLAIM_DIR:-$STATE_ROOT/slurm/state/claims}"
|
| 28 |
RETRY_DIR="${RETRY_DIR:-$STATE_ROOT/slurm/state/gpu_init_retries}"
|
| 29 |
MAX_GPU_INIT_RETRIES="${MAX_GPU_INIT_RETRIES:-3}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
MANIFEST="${MANIFEST:-${1:-}}"
|
| 32 |
if [[ -z "$MANIFEST" ]]; then
|
|
@@ -46,24 +58,25 @@ if [[ ! -f "$CONDA_SH" ]]; then
|
|
| 46 |
exit 1
|
| 47 |
fi
|
| 48 |
|
| 49 |
-
|
| 50 |
-
if [[
|
| 51 |
-
echo "No video
|
| 52 |
exit 1
|
| 53 |
fi
|
| 54 |
|
| 55 |
mkdir -p "$CLAIM_DIR" "$RETRY_DIR"
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
|
|
|
| 60 |
}
|
| 61 |
-
trap
|
| 62 |
|
| 63 |
export OMP_NUM_THREADS="${SLURM_CPUS_PER_TASK:-1}"
|
| 64 |
export MKL_NUM_THREADS="${SLURM_CPUS_PER_TASK:-1}"
|
| 65 |
|
| 66 |
-
echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} host=$(hostname) gpu=${CUDA_VISIBLE_DEVICES:-unset}
|
| 67 |
|
| 68 |
# shellcheck disable=SC1090
|
| 69 |
source "$CONDA_SH"
|
|
@@ -88,11 +101,13 @@ echo "Using conda env prefix=$CONDA_ENV_PREFIX"
|
|
| 88 |
echo "Using LD_PRELOAD=$LD_PRELOAD"
|
| 89 |
|
| 90 |
gpu_init_retry_state() {
|
| 91 |
-
local
|
| 92 |
-
local
|
| 93 |
-
local
|
| 94 |
-
local
|
| 95 |
-
|
|
|
|
|
|
|
| 96 |
attempts=$attempts
|
| 97 |
host=$host_name
|
| 98 |
gpu=$gpu_id
|
|
@@ -102,35 +117,41 @@ STATE
|
|
| 102 |
}
|
| 103 |
|
| 104 |
get_retry_attempts() {
|
| 105 |
-
|
| 106 |
-
|
|
|
|
|
|
|
| 107 |
else
|
| 108 |
echo 0
|
| 109 |
fi
|
| 110 |
}
|
| 111 |
|
| 112 |
mark_retry_exhausted() {
|
| 113 |
-
local
|
| 114 |
-
local
|
|
|
|
| 115 |
python - <<PY
|
| 116 |
from pathlib import Path
|
| 117 |
import sys, time
|
| 118 |
root = Path(r"$ROOT_DIR")
|
| 119 |
stats_path = Path(r"$STATS_NPZ")
|
| 120 |
-
video_id = r"$
|
| 121 |
attempts = int(r"$attempts")
|
| 122 |
reason = r'''$reason'''
|
| 123 |
sys.path.insert(0, str(root))
|
| 124 |
-
from utils.stats_npz import
|
| 125 |
-
|
|
|
|
| 126 |
stats_path,
|
|
|
|
| 127 |
video_id,
|
| 128 |
process_status="skipped",
|
| 129 |
last_error=f"gpu_init_retry_exhausted after {attempts} attempts: {reason}",
|
| 130 |
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 131 |
)
|
| 132 |
PY
|
| 133 |
-
rm -f "$RAW_VIDEO_DIR/$
|
|
|
|
| 134 |
}
|
| 135 |
|
| 136 |
should_retry_gpu_init_failure() {
|
|
@@ -153,44 +174,81 @@ env_cmd=(env
|
|
| 153 |
"CONDA_NO_PLUGINS=true"
|
| 154 |
)
|
| 155 |
|
| 156 |
-
|
| 157 |
--raw-video-dir "$RAW_VIDEO_DIR"
|
|
|
|
| 158 |
--dataset-dir "$DATASET_DIR"
|
|
|
|
| 159 |
--stats-npz "$STATS_NPZ"
|
|
|
|
| 160 |
--fps "$FPS"
|
| 161 |
--workers 1
|
| 162 |
--tmp-root "$TMP_ROOT"
|
| 163 |
-
--video-ids="$VIDEO_ID"
|
| 164 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
if [[ "$FORCE_PROCESS" == "1" ]]; then
|
| 166 |
-
|
| 167 |
fi
|
| 168 |
if [[ "$DELETE_SOURCE_ON_SUCCESS" == "1" ]]; then
|
| 169 |
-
|
| 170 |
fi
|
| 171 |
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
|
|
|
| 189 |
fi
|
| 190 |
-
|
| 191 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
fi
|
| 193 |
-
exit "$cmd_status"
|
| 194 |
-
fi
|
| 195 |
|
| 196 |
-
rm -f "$RETRY_STATE_PATH"
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
| 16 |
STATE_ROOT="${STATE_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
| 17 |
CONDA_SH="${CONDA_SH:-/home/sf895/miniconda3/etc/profile.d/conda.sh}"
|
| 18 |
+
CONDA_ENV="${CONDA_ENV:-signx2}"
|
| 19 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
| 20 |
+
SCRATCH_RAW_VIDEO_DIR="${SCRATCH_RAW_VIDEO_DIR:-/scratch/$USER/SignVerse-2M-runtime/raw_video}"
|
| 21 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
| 22 |
+
SCRATCH_DATASET_DIR="${SCRATCH_DATASET_DIR:-/scratch/$USER/SignVerse-2M-runtime/dataset}"
|
| 23 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
| 24 |
+
STATUS_JOURNAL_PATH="${STATUS_JOURNAL_PATH:-$RUNTIME_ROOT/upload_status_journal.jsonl}"
|
| 25 |
PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}"
|
| 26 |
FPS="${FPS:-24}"
|
| 27 |
TMP_ROOT="${TMP_ROOT:-${SLURM_TMPDIR:-/tmp}}"
|
|
|
|
| 30 |
CLAIM_DIR="${CLAIM_DIR:-$STATE_ROOT/slurm/state/claims}"
|
| 31 |
RETRY_DIR="${RETRY_DIR:-$STATE_ROOT/slurm/state/gpu_init_retries}"
|
| 32 |
MAX_GPU_INIT_RETRIES="${MAX_GPU_INIT_RETRIES:-3}"
|
| 33 |
+
OPTIMIZED_MODE="${OPTIMIZED_MODE:-1}"
|
| 34 |
+
OPTIMIZED_PROVIDER="${OPTIMIZED_PROVIDER:-cuda}"
|
| 35 |
+
OPTIMIZED_FRAME_BATCH_SIZE="${OPTIMIZED_FRAME_BATCH_SIZE:-8}"
|
| 36 |
+
OPTIMIZED_DETECT_RESOLUTION="${OPTIMIZED_DETECT_RESOLUTION:-512}"
|
| 37 |
+
OPTIMIZED_FRAME_STRIDE="${OPTIMIZED_FRAME_STRIDE:-1}"
|
| 38 |
+
OPTIMIZED_IO_BINDING="${OPTIMIZED_IO_BINDING:-1}"
|
| 39 |
+
OPTIMIZED_GPU_DETECTOR_POSTPROCESS="${OPTIMIZED_GPU_DETECTOR_POSTPROCESS:-1}"
|
| 40 |
+
OPTIMIZED_GPU_POSE_PREPROCESS="${OPTIMIZED_GPU_POSE_PREPROCESS:-0}"
|
| 41 |
+
VIDEOS_PER_JOB="${VIDEOS_PER_JOB:-5}"
|
| 42 |
|
| 43 |
MANIFEST="${MANIFEST:-${1:-}}"
|
| 44 |
if [[ -z "$MANIFEST" ]]; then
|
|
|
|
| 58 |
exit 1
|
| 59 |
fi
|
| 60 |
|
| 61 |
+
mapfile -t ALL_VIDEO_IDS < <(sed -n "$((SLURM_ARRAY_TASK_ID * VIDEOS_PER_JOB + 1)),$(((SLURM_ARRAY_TASK_ID + 1) * VIDEOS_PER_JOB))p" "$MANIFEST")
|
| 62 |
+
if [[ "${#ALL_VIDEO_IDS[@]}" -eq 0 ]]; then
|
| 63 |
+
echo "No video ids found for task index ${SLURM_ARRAY_TASK_ID} in manifest $MANIFEST" >&2
|
| 64 |
exit 1
|
| 65 |
fi
|
| 66 |
|
| 67 |
mkdir -p "$CLAIM_DIR" "$RETRY_DIR"
|
| 68 |
+
cleanup_claims() {
|
| 69 |
+
local video_id
|
| 70 |
+
for video_id in "${ALL_VIDEO_IDS[@]}"; do
|
| 71 |
+
rm -f "$CLAIM_DIR/${video_id}.claim"
|
| 72 |
+
done
|
| 73 |
}
|
| 74 |
+
trap cleanup_claims EXIT
|
| 75 |
|
| 76 |
export OMP_NUM_THREADS="${SLURM_CPUS_PER_TASK:-1}"
|
| 77 |
export MKL_NUM_THREADS="${SLURM_CPUS_PER_TASK:-1}"
|
| 78 |
|
| 79 |
+
echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} host=$(hostname) gpu=${CUDA_VISIBLE_DEVICES:-unset} videos=${#ALL_VIDEO_IDS[@]} first_video=${ALL_VIDEO_IDS[0]}"
|
| 80 |
|
| 81 |
# shellcheck disable=SC1090
|
| 82 |
source "$CONDA_SH"
|
|
|
|
| 101 |
echo "Using LD_PRELOAD=$LD_PRELOAD"
|
| 102 |
|
| 103 |
gpu_init_retry_state() {
|
| 104 |
+
local video_id="$1"
|
| 105 |
+
local attempts="$2"
|
| 106 |
+
local host_name="$3"
|
| 107 |
+
local gpu_id="$4"
|
| 108 |
+
local reason="$5"
|
| 109 |
+
local retry_state_path="$RETRY_DIR/${video_id}.state"
|
| 110 |
+
cat > "$retry_state_path" <<STATE
|
| 111 |
attempts=$attempts
|
| 112 |
host=$host_name
|
| 113 |
gpu=$gpu_id
|
|
|
|
| 117 |
}
|
| 118 |
|
| 119 |
get_retry_attempts() {
|
| 120 |
+
local video_id="$1"
|
| 121 |
+
local retry_state_path="$RETRY_DIR/${video_id}.state"
|
| 122 |
+
if [[ -f "$retry_state_path" ]]; then
|
| 123 |
+
awk -F'=' '/^attempts=/{print $2}' "$retry_state_path" | tail -n 1
|
| 124 |
else
|
| 125 |
echo 0
|
| 126 |
fi
|
| 127 |
}
|
| 128 |
|
| 129 |
mark_retry_exhausted() {
|
| 130 |
+
local video_id="$1"
|
| 131 |
+
local attempts="$2"
|
| 132 |
+
local reason="$3"
|
| 133 |
python - <<PY
|
| 134 |
from pathlib import Path
|
| 135 |
import sys, time
|
| 136 |
root = Path(r"$ROOT_DIR")
|
| 137 |
stats_path = Path(r"$STATS_NPZ")
|
| 138 |
+
video_id = r"$video_id"
|
| 139 |
attempts = int(r"$attempts")
|
| 140 |
reason = r'''$reason'''
|
| 141 |
sys.path.insert(0, str(root))
|
| 142 |
+
from utils.stats_npz import update_video_stats_best_effort
|
| 143 |
+
journal_path = Path(r"$STATUS_JOURNAL_PATH")
|
| 144 |
+
update_video_stats_best_effort(
|
| 145 |
stats_path,
|
| 146 |
+
journal_path,
|
| 147 |
video_id,
|
| 148 |
process_status="skipped",
|
| 149 |
last_error=f"gpu_init_retry_exhausted after {attempts} attempts: {reason}",
|
| 150 |
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 151 |
)
|
| 152 |
PY
|
| 153 |
+
rm -f "$RAW_VIDEO_DIR/$video_id.mp4" "$RAW_VIDEO_DIR/$video_id.mkv" "$RAW_VIDEO_DIR/$video_id.webm" "$RAW_VIDEO_DIR/$video_id.mov"
|
| 154 |
+
rm -f "$SCRATCH_RAW_VIDEO_DIR/$video_id.mp4" "$SCRATCH_RAW_VIDEO_DIR/$video_id.mkv" "$SCRATCH_RAW_VIDEO_DIR/$video_id.webm" "$SCRATCH_RAW_VIDEO_DIR/$video_id.mov"
|
| 155 |
}
|
| 156 |
|
| 157 |
should_retry_gpu_init_failure() {
|
|
|
|
| 174 |
"CONDA_NO_PLUGINS=true"
|
| 175 |
)
|
| 176 |
|
| 177 |
+
cmd_base=(python -u "$PIPELINE02"
|
| 178 |
--raw-video-dir "$RAW_VIDEO_DIR"
|
| 179 |
+
--scratch-raw-video-dir "$SCRATCH_RAW_VIDEO_DIR"
|
| 180 |
--dataset-dir "$DATASET_DIR"
|
| 181 |
+
--scratch-dataset-dir "$SCRATCH_DATASET_DIR"
|
| 182 |
--stats-npz "$STATS_NPZ"
|
| 183 |
+
--status-journal-path "$STATUS_JOURNAL_PATH"
|
| 184 |
--fps "$FPS"
|
| 185 |
--workers 1
|
| 186 |
--tmp-root "$TMP_ROOT"
|
|
|
|
| 187 |
)
|
| 188 |
+
if [[ "$OPTIMIZED_MODE" == "1" ]]; then
|
| 189 |
+
cmd_base+=(--optimized-mode)
|
| 190 |
+
cmd_base+=(--optimized-provider "$OPTIMIZED_PROVIDER")
|
| 191 |
+
cmd_base+=(--optimized-frame-batch-size "$OPTIMIZED_FRAME_BATCH_SIZE")
|
| 192 |
+
cmd_base+=(--optimized-detect-resolution "$OPTIMIZED_DETECT_RESOLUTION")
|
| 193 |
+
cmd_base+=(--optimized-frame-stride "$OPTIMIZED_FRAME_STRIDE")
|
| 194 |
+
if [[ "$OPTIMIZED_IO_BINDING" == "1" ]]; then
|
| 195 |
+
cmd_base+=(--optimized-io-binding)
|
| 196 |
+
fi
|
| 197 |
+
if [[ "$OPTIMIZED_GPU_DETECTOR_POSTPROCESS" == "1" ]]; then
|
| 198 |
+
cmd_base+=(--optimized-gpu-detector-postprocess)
|
| 199 |
+
fi
|
| 200 |
+
if [[ "$OPTIMIZED_GPU_POSE_PREPROCESS" == "1" ]]; then
|
| 201 |
+
cmd_base+=(--optimized-gpu-pose-preprocess)
|
| 202 |
+
fi
|
| 203 |
+
else
|
| 204 |
+
cmd_base+=(--legacy-mode)
|
| 205 |
+
fi
|
| 206 |
if [[ "$FORCE_PROCESS" == "1" ]]; then
|
| 207 |
+
cmd_base+=(--force)
|
| 208 |
fi
|
| 209 |
if [[ "$DELETE_SOURCE_ON_SUCCESS" == "1" ]]; then
|
| 210 |
+
cmd_base+=(--delete-source-on-success)
|
| 211 |
fi
|
| 212 |
|
| 213 |
+
overall_status=0
|
| 214 |
+
for VIDEO_ID in "${ALL_VIDEO_IDS[@]}"; do
|
| 215 |
+
[[ -z "$VIDEO_ID" ]] && continue
|
| 216 |
+
CLAIM_PATH="$CLAIM_DIR/${VIDEO_ID}.claim"
|
| 217 |
+
RETRY_STATE_PATH="$RETRY_DIR/${VIDEO_ID}.state"
|
| 218 |
+
echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} host=$(hostname) gpu=${CUDA_VISIBLE_DEVICES:-unset} video_id=$VIDEO_ID"
|
| 219 |
+
|
| 220 |
+
TMP_LOG="$(mktemp "${TMP_ROOT%/}/dwpose_${VIDEO_ID}_XXXX.log")"
|
| 221 |
+
cmd=("${cmd_base[@]}" --video-ids="$VIDEO_ID")
|
| 222 |
+
set +e
|
| 223 |
+
"${env_cmd[@]}" conda run -n "$CONDA_ENV" "${cmd[@]}" 2>&1 | tee "$TMP_LOG"
|
| 224 |
+
cmd_status=${PIPESTATUS[0]}
|
| 225 |
+
set -e
|
| 226 |
+
|
| 227 |
+
if [[ "$cmd_status" -ne 0 ]]; then
|
| 228 |
+
npz_dir="$DATASET_DIR/$VIDEO_ID/npz"
|
| 229 |
+
if [[ ! -d "$npz_dir" && -d "$SCRATCH_DATASET_DIR/$VIDEO_ID/npz" ]]; then
|
| 230 |
+
npz_dir="$SCRATCH_DATASET_DIR/$VIDEO_ID/npz"
|
| 231 |
fi
|
| 232 |
+
if should_retry_gpu_init_failure "$TMP_LOG" "$npz_dir"; then
|
| 233 |
+
attempts="$(get_retry_attempts "$VIDEO_ID")"
|
| 234 |
+
attempts="$((attempts + 1))"
|
| 235 |
+
last_reason="$(tail -n 80 "$TMP_LOG" | tr '\n' ' ' | sed 's/[[:space:]]\+/ /g' | cut -c1-1200)"
|
| 236 |
+
gpu_init_retry_state "$VIDEO_ID" "$attempts" "$(hostname)" "${CUDA_VISIBLE_DEVICES:-unset}" "$last_reason"
|
| 237 |
+
if [[ "$attempts" -ge "$MAX_GPU_INIT_RETRIES" ]]; then
|
| 238 |
+
echo "GPU init failed on multiple GPUs; marking $VIDEO_ID as skipped after $attempts attempts." >&2
|
| 239 |
+
mark_retry_exhausted "$VIDEO_ID" "$attempts" "$last_reason"
|
| 240 |
+
rm -f "$CLAIM_PATH"
|
| 241 |
+
continue
|
| 242 |
+
fi
|
| 243 |
+
echo "GPU init failure for $VIDEO_ID on host=$(hostname) gpu=${CUDA_VISIBLE_DEVICES:-unset}; retry attempt $attempts/$MAX_GPU_INIT_RETRIES will be resubmitted later." >&2
|
| 244 |
+
overall_status="$cmd_status"
|
| 245 |
+
break
|
| 246 |
+
fi
|
| 247 |
+
overall_status="$cmd_status"
|
| 248 |
+
break
|
| 249 |
fi
|
|
|
|
|
|
|
| 250 |
|
| 251 |
+
rm -f "$RETRY_STATE_PATH" "$CLAIM_PATH"
|
| 252 |
+
done
|
| 253 |
+
|
| 254 |
+
exit "$overall_status"
|
slurm/process_upload_parallel_array.slurm
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
#SBATCH --job-name=upload
|
| 3 |
+
#SBATCH --nodes=1
|
| 4 |
+
#SBATCH --ntasks=1
|
| 5 |
+
#SBATCH --cpus-per-task=2
|
| 6 |
+
#SBATCH --mem=8G
|
| 7 |
+
#SBATCH --time=24:00:00
|
| 8 |
+
#SBATCH --output=%x_%A_%a.out
|
| 9 |
+
#SBATCH --error=%x_%A_%a.err
|
| 10 |
+
|
| 11 |
+
set -euo pipefail
|
| 12 |
+
|
| 13 |
+
ROOT_DIR="${ROOT_DIR:-/home/sf895/SignVerse-2M}"
|
| 14 |
+
RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
| 15 |
+
CONDA_SH="${CONDA_SH:-/home/sf895/miniconda3/etc/profile.d/conda.sh}"
|
| 16 |
+
CONDA_ENV="${CONDA_ENV:-signx2}"
|
| 17 |
+
PIPELINE03="${PIPELINE03:-$ROOT_DIR/scripts/pipeline03_upload_to_huggingface.py}"
|
| 18 |
+
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
| 19 |
+
SCRATCH_DATASET_DIR="${SCRATCH_DATASET_DIR:-/scratch/$USER/SignVerse-2M-runtime/dataset}"
|
| 20 |
+
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
| 21 |
+
SCRATCH_RAW_VIDEO_DIR="${SCRATCH_RAW_VIDEO_DIR:-/scratch/$USER/SignVerse-2M-runtime/raw_video}"
|
| 22 |
+
RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}"
|
| 23 |
+
RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}"
|
| 24 |
+
ARCHIVE_DIR="${ARCHIVE_DIR:-$ROOT_DIR/archives}"
|
| 25 |
+
PROGRESS_JSON="${PROGRESS_JSON:-$RUNTIME_ROOT/archive_upload_progress.json}"
|
| 26 |
+
STATUS_JOURNAL_PATH="${STATUS_JOURNAL_PATH:-$RUNTIME_ROOT/upload_status_journal.jsonl}"
|
| 27 |
+
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
| 28 |
+
REPO_ID="${REPO_ID:-SignerX/SignVerse-2M}"
|
| 29 |
+
REPO_REVISION="${REPO_REVISION:-dev}"
|
| 30 |
+
TARGET_BYTES="${TARGET_BYTES:-10737418240}"
|
| 31 |
+
TARGET_FOLDERS="${TARGET_FOLDERS:-40}"
|
| 32 |
+
PARALLEL_SHARDS="${PARALLEL_SHARDS:-1}"
|
| 33 |
+
START_STAGGER_MIN="${START_STAGGER_MIN:-1}"
|
| 34 |
+
START_STAGGER_MAX="${START_STAGGER_MAX:-3}"
|
| 35 |
+
ALLOW_SMALL_FINAL_BATCH="${ALLOW_SMALL_FINAL_BATCH:-0}"
|
| 36 |
+
REQUIRE_TARGET_BYTES="${REQUIRE_TARGET_BYTES:-1}"
|
| 37 |
+
DRY_RUN_UPLOAD="${DRY_RUN_UPLOAD:-0}"
|
| 38 |
+
UPLOAD_MODE="${UPLOAD_MODE:-api}"
|
| 39 |
+
|
| 40 |
+
if [[ ! -f "$CONDA_SH" ]]; then
|
| 41 |
+
echo "Missing conda init script: $CONDA_SH" >&2
|
| 42 |
+
exit 1
|
| 43 |
+
fi
|
| 44 |
+
|
| 45 |
+
if [[ -z "${SLURM_ARRAY_TASK_ID:-}" ]]; then
|
| 46 |
+
echo "SLURM_ARRAY_TASK_ID is required." >&2
|
| 47 |
+
exit 1
|
| 48 |
+
fi
|
| 49 |
+
|
| 50 |
+
if (( PARALLEL_SHARDS < 1 )); then
|
| 51 |
+
echo "PARALLEL_SHARDS must be >= 1" >&2
|
| 52 |
+
exit 1
|
| 53 |
+
fi
|
| 54 |
+
|
| 55 |
+
if (( SLURM_ARRAY_TASK_ID < 0 || SLURM_ARRAY_TASK_ID >= PARALLEL_SHARDS )); then
|
| 56 |
+
echo "SLURM_ARRAY_TASK_ID=$SLURM_ARRAY_TASK_ID is out of range for PARALLEL_SHARDS=$PARALLEL_SHARDS" >&2
|
| 57 |
+
exit 1
|
| 58 |
+
fi
|
| 59 |
+
|
| 60 |
+
echo "[$(date '+%F %T')] upload shard=${SLURM_ARRAY_TASK_ID}/${PARALLEL_SHARDS} host=$(hostname)"
|
| 61 |
+
|
| 62 |
+
# shellcheck disable=SC1090
|
| 63 |
+
source "$CONDA_SH"
|
| 64 |
+
|
| 65 |
+
cmd=(python -u "$PIPELINE03"
|
| 66 |
+
--dataset-dir "$DATASET_DIR"
|
| 67 |
+
--scratch-dataset-dir "$SCRATCH_DATASET_DIR"
|
| 68 |
+
--raw-video-dir "$RAW_VIDEO_DIR"
|
| 69 |
+
--scratch-raw-video-dir "$SCRATCH_RAW_VIDEO_DIR"
|
| 70 |
+
--raw-caption-dir "$RAW_CAPTION_DIR"
|
| 71 |
+
--raw-metadata-dir "$RAW_METADATA_DIR"
|
| 72 |
+
--archive-dir "$ARCHIVE_DIR"
|
| 73 |
+
--progress-path "$PROGRESS_JSON"
|
| 74 |
+
--stats-npz "$STATS_NPZ"
|
| 75 |
+
--status-journal-path "$STATUS_JOURNAL_PATH"
|
| 76 |
+
--repo-id "$REPO_ID"
|
| 77 |
+
--repo-revision "$REPO_REVISION"
|
| 78 |
+
--target-bytes "$TARGET_BYTES"
|
| 79 |
+
--target-folders "$TARGET_FOLDERS"
|
| 80 |
+
--parallel-shards "$PARALLEL_SHARDS"
|
| 81 |
+
--shard-index "$SLURM_ARRAY_TASK_ID"
|
| 82 |
+
--start-stagger-min "$START_STAGGER_MIN"
|
| 83 |
+
--start-stagger-max "$START_STAGGER_MAX"
|
| 84 |
+
--upload-mode "$UPLOAD_MODE"
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
if [[ "$ALLOW_SMALL_FINAL_BATCH" == "1" ]]; then
|
| 88 |
+
cmd+=(--allow-small-final-batch)
|
| 89 |
+
fi
|
| 90 |
+
if [[ "$REQUIRE_TARGET_BYTES" != "1" ]]; then
|
| 91 |
+
cmd+=(--allow-small-final-batch)
|
| 92 |
+
fi
|
| 93 |
+
if [[ "$DRY_RUN_UPLOAD" == "1" ]]; then
|
| 94 |
+
cmd+=(--dry-run)
|
| 95 |
+
fi
|
| 96 |
+
|
| 97 |
+
cmd+=(--skip-stats-write)
|
| 98 |
+
|
| 99 |
+
CONDA_NO_PLUGINS=true conda run -n "$CONDA_ENV" "${cmd[@]}"
|
slurm/run_reproduce_independently_slurm.slurm
CHANGED
|
@@ -5,8 +5,8 @@
|
|
| 5 |
#SBATCH --cpus-per-task=2
|
| 6 |
#SBATCH --mem=8G
|
| 7 |
#SBATCH --time=24:00:00
|
| 8 |
-
#SBATCH --output=%x_%A.out
|
| 9 |
-
#SBATCH --error=%x_%A.err
|
| 10 |
|
| 11 |
set -euo pipefail
|
| 12 |
|
|
|
|
| 5 |
#SBATCH --cpus-per-task=2
|
| 6 |
#SBATCH --mem=8G
|
| 7 |
#SBATCH --time=24:00:00
|
| 8 |
+
#SBATCH --output=/scratch/%u/SignVerse-2M-runtime/slurm/logs/%x_%A.out
|
| 9 |
+
#SBATCH --error=/scratch/%u/SignVerse-2M-runtime/slurm/logs/%x_%A.err
|
| 10 |
|
| 11 |
set -euo pipefail
|
| 12 |
|
slurm/submit_download_slurm.sh
CHANGED
|
@@ -7,10 +7,14 @@ STATE_ROOT="${STATE_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
|
| 7 |
SOURCE_METADATA_CSV="${SOURCE_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_ori.csv}"
|
| 8 |
OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv}"
|
| 9 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
|
|
|
|
|
|
|
|
|
| 10 |
RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}"
|
| 11 |
RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}"
|
| 12 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
| 13 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
|
|
|
| 14 |
SLURM_SCRIPT="$ROOT_DIR/slurm/process_download_array.slurm"
|
| 15 |
MANIFEST_DIR="${MANIFEST_DIR:-$STATE_ROOT/slurm/manifests}"
|
| 16 |
LOG_DIR="${LOG_DIR:-$STATE_ROOT/slurm/logs}"
|
|
@@ -25,7 +29,7 @@ CPUS_PER_TASK="1"
|
|
| 25 |
MEMORY="4G"
|
| 26 |
LIMIT=""
|
| 27 |
ARRAY_PARALLEL=""
|
| 28 |
-
MAX_BACKLOG_VIDEOS="
|
| 29 |
MAX_ACTIVE_DOWNLOADS="60"
|
| 30 |
DOWNLOAD_CLAIM_GRACE_SECONDS="${DOWNLOAD_CLAIM_GRACE_SECONDS:-600}"
|
| 31 |
WORKERS="60"
|
|
@@ -57,6 +61,9 @@ Options:
|
|
| 57 |
--max-backlog-videos N Max raw backlog + active download claims allowed. Default: 180
|
| 58 |
--max-active-downloads N Max active download claims allowed at once. Default: 30
|
| 59 |
--workers N Max download tasks to submit in one cycle. Default: 60
|
|
|
|
|
|
|
|
|
|
| 60 |
--claim-dir DIR Download claim directory
|
| 61 |
--csv-lock-path PATH CSV lock path
|
| 62 |
--video-ids ID [ID ...] Restrict this cycle to specific videos
|
|
@@ -85,6 +92,9 @@ while [[ $# -gt 0 ]]; do
|
|
| 85 |
--array-parallel) ARRAY_PARALLEL="$2"; shift 2 ;;
|
| 86 |
--max-backlog-videos) MAX_BACKLOG_VIDEOS="$2"; shift 2 ;;
|
| 87 |
--workers) WORKERS="$2"; shift 2 ;;
|
|
|
|
|
|
|
|
|
|
| 88 |
--max-active-downloads) MAX_ACTIVE_DOWNLOADS="$2"; shift 2 ;;
|
| 89 |
--claim-dir) DOWNLOAD_CLAIM_DIR="$2"; shift 2 ;;
|
| 90 |
--csv-lock-path) DOWNLOAD_CSV_LOCK_PATH="$2"; shift 2 ;;
|
|
@@ -123,7 +133,7 @@ ACTIVE_DOWNLOAD_TASKS_FILE="$STATE_DIR/active_download_tasks_${TIMESTAMP}.txt"
|
|
| 123 |
squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
|
| 124 |
squeue -h -u "$USER" -n download -t RUNNING,PENDING,CONFIGURING -o "%i" > "$ACTIVE_DOWNLOAD_TASKS_FILE"
|
| 125 |
|
| 126 |
-
SELECTED_COUNT="$(python - "$SOURCE_METADATA_CSV" "$OUTPUT_METADATA_CSV" "$RAW_VIDEO_DIR" "$DOWNLOAD_CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$ACTIVE_DOWNLOAD_TASKS_FILE" "$MAX_BACKLOG_VIDEOS" "$MAX_ACTIVE_DOWNLOADS" "$LIMIT" "$WORKERS" "$BASE_MANIFEST" "${VIDEO_IDS[*]:-}" <<'PY'
|
| 127 |
import csv
|
| 128 |
import sys
|
| 129 |
from pathlib import Path
|
|
@@ -131,16 +141,29 @@ from pathlib import Path
|
|
| 131 |
source_csv = Path(sys.argv[1])
|
| 132 |
processed_csv = Path(sys.argv[2])
|
| 133 |
raw_video_dir = Path(sys.argv[3])
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
limit = int(limit_arg) if limit_arg else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
video_filter = set(video_ids_joined.split()) if video_ids_joined else None
|
| 145 |
|
| 146 |
video_extensions = {'.mp4', '.mkv', '.webm', '.mov'}
|
|
@@ -194,13 +217,24 @@ for claim_path in claim_dir.glob('*.claim'):
|
|
| 194 |
else:
|
| 195 |
claim_path.unlink(missing_ok=True)
|
| 196 |
|
| 197 |
-
|
|
|
|
| 198 |
existing_raw = set()
|
| 199 |
if raw_video_dir.exists():
|
| 200 |
for path in raw_video_dir.iterdir():
|
| 201 |
if path.is_file() and path.suffix.lower() in video_extensions:
|
| 202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
existing_raw.add(path.stem)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
|
| 205 |
effective_active_downloads = max(active_download_tasks, len(active_claims))
|
| 206 |
remaining_slots = max(0, max_backlog - raw_backlog - len(active_claims))
|
|
@@ -231,6 +265,8 @@ for row in rows:
|
|
| 231 |
continue
|
| 232 |
if video_id in active_claims or video_id in existing_raw:
|
| 233 |
continue
|
|
|
|
|
|
|
| 234 |
download_status = (row.get('download_status') or '').strip()
|
| 235 |
if download_status in {'ok', 'skipped'}:
|
| 236 |
continue
|
|
@@ -267,7 +303,7 @@ cmd=(sbatch
|
|
| 267 |
--array "$ARRAY_SPEC"
|
| 268 |
--output "$LOG_DIR/download_%A_%a.out"
|
| 269 |
--error "$LOG_DIR/download_%A_%a.err"
|
| 270 |
-
--export "ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,CONDA_SH=/home/sf895/miniconda3/etc/profile.d/conda.sh,CONDA_ENV=signx2,SOURCE_METADATA_CSV=$SOURCE_METADATA_CSV,OUTPUT_METADATA_CSV=$OUTPUT_METADATA_CSV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,RAW_CAPTION_DIR=$RAW_CAPTION_DIR,RAW_METADATA_DIR=$RAW_METADATA_DIR,DATASET_DIR=$DATASET_DIR,STATS_NPZ=$STATS_NPZ,PIPELINE01=$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py,DOWNLOAD_CLAIM_DIR=$DOWNLOAD_CLAIM_DIR,DOWNLOAD_CSV_LOCK_PATH=$DOWNLOAD_CSV_LOCK_PATH,MANIFEST=$BASE_MANIFEST,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS,DOWNLOAD_START_STAGGER_MIN=${DOWNLOAD_START_STAGGER_MIN:-1},DOWNLOAD_START_STAGGER_MAX=${DOWNLOAD_START_STAGGER_MAX:-3}"
|
| 271 |
)
|
| 272 |
if [[ -n "$ACCOUNT" ]]; then
|
| 273 |
cmd+=(--account "$ACCOUNT")
|
|
|
|
| 7 |
SOURCE_METADATA_CSV="${SOURCE_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_ori.csv}"
|
| 8 |
OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/SignVerse-2M-metadata_processed.csv}"
|
| 9 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
| 10 |
+
SCRATCH_RAW_VIDEO_DIR="${SCRATCH_RAW_VIDEO_DIR:-/scratch/$USER/SignVerse-2M-runtime/raw_video}"
|
| 11 |
+
HOME_RAW_VIDEO_LIMIT="${HOME_RAW_VIDEO_LIMIT:-180}"
|
| 12 |
+
SCRATCH_RAW_VIDEO_LIMIT="${SCRATCH_RAW_VIDEO_LIMIT:-2800}"
|
| 13 |
RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}"
|
| 14 |
RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}"
|
| 15 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
| 16 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
| 17 |
+
PROGRESS_JSON="${PROGRESS_JSON:-$RUNTIME_ROOT/archive_upload_progress.json}"
|
| 18 |
SLURM_SCRIPT="$ROOT_DIR/slurm/process_download_array.slurm"
|
| 19 |
MANIFEST_DIR="${MANIFEST_DIR:-$STATE_ROOT/slurm/manifests}"
|
| 20 |
LOG_DIR="${LOG_DIR:-$STATE_ROOT/slurm/logs}"
|
|
|
|
| 29 |
MEMORY="4G"
|
| 30 |
LIMIT=""
|
| 31 |
ARRAY_PARALLEL=""
|
| 32 |
+
MAX_BACKLOG_VIDEOS="2980"
|
| 33 |
MAX_ACTIVE_DOWNLOADS="60"
|
| 34 |
DOWNLOAD_CLAIM_GRACE_SECONDS="${DOWNLOAD_CLAIM_GRACE_SECONDS:-600}"
|
| 35 |
WORKERS="60"
|
|
|
|
| 61 |
--max-backlog-videos N Max raw backlog + active download claims allowed. Default: 180
|
| 62 |
--max-active-downloads N Max active download claims allowed at once. Default: 30
|
| 63 |
--workers N Max download tasks to submit in one cycle. Default: 60
|
| 64 |
+
--scratch-raw-video-dir DIR Scratch raw video pool directory
|
| 65 |
+
--home-raw-video-limit N Home raw pool cap. Default: 180
|
| 66 |
+
--scratch-raw-video-limit N Scratch raw pool cap. Default: 2800
|
| 67 |
--claim-dir DIR Download claim directory
|
| 68 |
--csv-lock-path PATH CSV lock path
|
| 69 |
--video-ids ID [ID ...] Restrict this cycle to specific videos
|
|
|
|
| 92 |
--array-parallel) ARRAY_PARALLEL="$2"; shift 2 ;;
|
| 93 |
--max-backlog-videos) MAX_BACKLOG_VIDEOS="$2"; shift 2 ;;
|
| 94 |
--workers) WORKERS="$2"; shift 2 ;;
|
| 95 |
+
--scratch-raw-video-dir) SCRATCH_RAW_VIDEO_DIR="$2"; shift 2 ;;
|
| 96 |
+
--home-raw-video-limit) HOME_RAW_VIDEO_LIMIT="$2"; shift 2 ;;
|
| 97 |
+
--scratch-raw-video-limit) SCRATCH_RAW_VIDEO_LIMIT="$2"; shift 2 ;;
|
| 98 |
--max-active-downloads) MAX_ACTIVE_DOWNLOADS="$2"; shift 2 ;;
|
| 99 |
--claim-dir) DOWNLOAD_CLAIM_DIR="$2"; shift 2 ;;
|
| 100 |
--csv-lock-path) DOWNLOAD_CSV_LOCK_PATH="$2"; shift 2 ;;
|
|
|
|
| 133 |
squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
|
| 134 |
squeue -h -u "$USER" -n download -t RUNNING,PENDING,CONFIGURING -o "%i" > "$ACTIVE_DOWNLOAD_TASKS_FILE"
|
| 135 |
|
| 136 |
+
SELECTED_COUNT="$(python - "$SOURCE_METADATA_CSV" "$OUTPUT_METADATA_CSV" "$RAW_VIDEO_DIR" "$SCRATCH_RAW_VIDEO_DIR" "$DOWNLOAD_CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$ACTIVE_DOWNLOAD_TASKS_FILE" "$MAX_BACKLOG_VIDEOS" "$MAX_ACTIVE_DOWNLOADS" "$LIMIT" "$WORKERS" "$BASE_MANIFEST" "$HOME_RAW_VIDEO_LIMIT" "$SCRATCH_RAW_VIDEO_LIMIT" "$PROGRESS_JSON" "${VIDEO_IDS[*]:-}" <<'PY'
|
| 137 |
import csv
|
| 138 |
import sys
|
| 139 |
from pathlib import Path
|
|
|
|
| 141 |
source_csv = Path(sys.argv[1])
|
| 142 |
processed_csv = Path(sys.argv[2])
|
| 143 |
raw_video_dir = Path(sys.argv[3])
|
| 144 |
+
scratch_raw_video_dir = Path(sys.argv[4])
|
| 145 |
+
claim_dir = Path(sys.argv[5])
|
| 146 |
+
active_jobs_path = Path(sys.argv[6])
|
| 147 |
+
active_download_tasks_path = Path(sys.argv[7])
|
| 148 |
+
max_backlog = int(sys.argv[8])
|
| 149 |
+
max_active_downloads = int(sys.argv[9])
|
| 150 |
+
limit_arg = sys.argv[10]
|
| 151 |
+
workers = int(sys.argv[11])
|
| 152 |
+
manifest_path = Path(sys.argv[12])
|
| 153 |
+
home_raw_video_limit = int(sys.argv[13])
|
| 154 |
+
scratch_raw_video_limit = int(sys.argv[14])
|
| 155 |
+
progress_path = Path(sys.argv[15])
|
| 156 |
+
video_ids_joined = sys.argv[16].strip()
|
| 157 |
limit = int(limit_arg) if limit_arg else None
|
| 158 |
+
|
| 159 |
+
uploaded_ids = set()
|
| 160 |
+
if progress_path.exists():
|
| 161 |
+
try:
|
| 162 |
+
import json
|
| 163 |
+
progress = json.loads(progress_path.read_text(encoding="utf-8"))
|
| 164 |
+
uploaded_ids = set((progress.get("uploaded_folders") or {}).keys())
|
| 165 |
+
except Exception:
|
| 166 |
+
uploaded_ids = set()
|
| 167 |
video_filter = set(video_ids_joined.split()) if video_ids_joined else None
|
| 168 |
|
| 169 |
video_extensions = {'.mp4', '.mkv', '.webm', '.mov'}
|
|
|
|
| 217 |
else:
|
| 218 |
claim_path.unlink(missing_ok=True)
|
| 219 |
|
| 220 |
+
home_raw_count = 0
|
| 221 |
+
scratch_raw_count = 0
|
| 222 |
existing_raw = set()
|
| 223 |
if raw_video_dir.exists():
|
| 224 |
for path in raw_video_dir.iterdir():
|
| 225 |
if path.is_file() and path.suffix.lower() in video_extensions:
|
| 226 |
+
home_raw_count += 1
|
| 227 |
+
existing_raw.add(path.stem)
|
| 228 |
+
if scratch_raw_video_dir.exists():
|
| 229 |
+
for path in scratch_raw_video_dir.iterdir():
|
| 230 |
+
if path.is_file() and path.suffix.lower() in video_extensions:
|
| 231 |
+
scratch_raw_count += 1
|
| 232 |
existing_raw.add(path.stem)
|
| 233 |
+
raw_backlog = home_raw_count + scratch_raw_count
|
| 234 |
+
if home_raw_count >= home_raw_video_limit and scratch_raw_count >= scratch_raw_video_limit:
|
| 235 |
+
manifest_path.write_text('', encoding='utf-8')
|
| 236 |
+
print(0)
|
| 237 |
+
raise SystemExit
|
| 238 |
|
| 239 |
effective_active_downloads = max(active_download_tasks, len(active_claims))
|
| 240 |
remaining_slots = max(0, max_backlog - raw_backlog - len(active_claims))
|
|
|
|
| 265 |
continue
|
| 266 |
if video_id in active_claims or video_id in existing_raw:
|
| 267 |
continue
|
| 268 |
+
if video_id in uploaded_ids:
|
| 269 |
+
continue
|
| 270 |
download_status = (row.get('download_status') or '').strip()
|
| 271 |
if download_status in {'ok', 'skipped'}:
|
| 272 |
continue
|
|
|
|
| 303 |
--array "$ARRAY_SPEC"
|
| 304 |
--output "$LOG_DIR/download_%A_%a.out"
|
| 305 |
--error "$LOG_DIR/download_%A_%a.err"
|
| 306 |
+
--export "ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,CONDA_SH=/home/sf895/miniconda3/etc/profile.d/conda.sh,CONDA_ENV=signx2,SOURCE_METADATA_CSV=$SOURCE_METADATA_CSV,OUTPUT_METADATA_CSV=$OUTPUT_METADATA_CSV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,SCRATCH_RAW_VIDEO_DIR=$SCRATCH_RAW_VIDEO_DIR,HOME_RAW_VIDEO_LIMIT=$HOME_RAW_VIDEO_LIMIT,SCRATCH_RAW_VIDEO_LIMIT=$SCRATCH_RAW_VIDEO_LIMIT,RAW_CAPTION_DIR=$RAW_CAPTION_DIR,RAW_METADATA_DIR=$RAW_METADATA_DIR,DATASET_DIR=$DATASET_DIR,STATS_NPZ=$STATS_NPZ,STATUS_JOURNAL_PATH=$RUNTIME_ROOT/upload_status_journal.jsonl,PIPELINE01=$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py,DOWNLOAD_CLAIM_DIR=$DOWNLOAD_CLAIM_DIR,DOWNLOAD_CSV_LOCK_PATH=$DOWNLOAD_CSV_LOCK_PATH,MANIFEST=$BASE_MANIFEST,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS,DOWNLOAD_START_STAGGER_MIN=${DOWNLOAD_START_STAGGER_MIN:-1},DOWNLOAD_START_STAGGER_MAX=${DOWNLOAD_START_STAGGER_MAX:-3}"
|
| 307 |
)
|
| 308 |
if [[ -n "$ACCOUNT" ]]; then
|
| 309 |
cmd+=(--account "$ACCOUNT")
|
slurm/submit_dwpose_slurm.sh
CHANGED
|
@@ -7,8 +7,11 @@ STATE_ROOT="${STATE_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
|
| 7 |
CONDA_SH="/home/sf895/miniconda3/etc/profile.d/conda.sh"
|
| 8 |
CONDA_ENV="signx2"
|
| 9 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
|
|
|
| 10 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
|
|
|
| 11 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
|
|
|
| 12 |
SLURM_SCRIPT="$ROOT_DIR/slurm/process_dwpose_array.slurm"
|
| 13 |
MANIFEST_DIR="${MANIFEST_DIR:-$STATE_ROOT/slurm/manifests}"
|
| 14 |
LOG_DIR="${LOG_DIR:-$STATE_ROOT/slurm/logs}"
|
|
@@ -23,10 +26,17 @@ MEMORY="32G"
|
|
| 23 |
FPS="24"
|
| 24 |
LIMIT=""
|
| 25 |
ARRAY_PARALLEL=""
|
| 26 |
-
MAX_BACKLOG_VIDEOS="
|
| 27 |
FORCE_PROCESS=0
|
| 28 |
DELETE_SOURCE_ON_SUCCESS=0
|
|
|
|
| 29 |
MAX_PER_NODE=""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
usage() {
|
| 32 |
cat <<USAGE
|
|
@@ -43,9 +53,10 @@ Options:
|
|
| 43 |
--mem SIZE Default: 32G
|
| 44 |
--fps N Default: 24
|
| 45 |
--limit N Only submit the first N pending, unclaimed videos this cycle
|
| 46 |
-
--max-backlog-videos N Max claimed queued/running videos allowed at once. Default:
|
| 47 |
-
--array-parallel N Add a %N cap to each
|
| 48 |
-
--max-per-node N Cap
|
|
|
|
| 49 |
--force-process Re-run videos even if marked complete
|
| 50 |
--delete-source-on-success Delete raw videos after successful processing
|
| 51 |
--help
|
|
@@ -65,7 +76,9 @@ while [[ $# -gt 0 ]]; do
|
|
| 65 |
--limit) LIMIT="$2"; shift 2 ;;
|
| 66 |
--max-backlog-videos) MAX_BACKLOG_VIDEOS="$2"; shift 2 ;;
|
| 67 |
--array-parallel) ARRAY_PARALLEL="$2"; shift 2 ;;
|
| 68 |
-
--max-per-node) MAX_PER_NODE="$2"; shift 2 ;;
|
|
|
|
|
|
|
| 69 |
--force-process) FORCE_PROCESS=1; shift ;;
|
| 70 |
--delete-source-on-success) DELETE_SOURCE_ON_SUCCESS=1; shift ;;
|
| 71 |
-h|--help) usage; exit 0 ;;
|
|
@@ -83,24 +96,39 @@ fi
|
|
| 83 |
TIMESTAMP="$(date '+%Y%m%d_%H%M%S')"
|
| 84 |
BASE_MANIFEST="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.txt"
|
| 85 |
ACTIVE_JOBS_FILE="$STATE_DIR/active_jobs_${TIMESTAMP}.txt"
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
|
| 89 |
|
| 90 |
-
PENDING_COUNT="$(python - "$RAW_VIDEO_DIR" "$DATASET_DIR" "$LIMIT" "$FORCE_PROCESS" "$BASE_MANIFEST" "$CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$MAX_BACKLOG_VIDEOS" <<'PY'
|
| 91 |
import sys
|
| 92 |
from pathlib import Path
|
| 93 |
|
| 94 |
raw_video_dir = Path(sys.argv[1])
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
| 102 |
limit = int(limit_arg) if limit_arg else None
|
| 103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
video_extensions = {'.mp4', '.mkv', '.webm', '.mov'}
|
| 105 |
claim_dir.mkdir(parents=True, exist_ok=True)
|
| 106 |
active_jobs = set()
|
|
@@ -126,15 +154,27 @@ for claim_path in claim_dir.glob('*.claim'):
|
|
| 126 |
|
| 127 |
remaining_slots = max(0, max_backlog - len(active_claims))
|
| 128 |
selected = []
|
| 129 |
-
if remaining_slots > 0
|
| 130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
if not path.is_file() or path.suffix.lower() not in video_extensions:
|
| 132 |
continue
|
| 133 |
video_id = path.stem
|
|
|
|
|
|
|
|
|
|
| 134 |
if video_id in active_claims:
|
| 135 |
continue
|
|
|
|
|
|
|
| 136 |
complete_marker = dataset_dir / video_id / 'npz' / '.complete'
|
| 137 |
-
|
|
|
|
| 138 |
continue
|
| 139 |
selected.append(video_id)
|
| 140 |
if len(selected) >= remaining_slots:
|
|
@@ -153,17 +193,86 @@ if [[ "$PENDING_COUNT" == "0" ]]; then
|
|
| 153 |
exit 0
|
| 154 |
fi
|
| 155 |
|
| 156 |
-
AVAILABLE_SLOTS="$(python - "$PARTITIONS" "$
|
| 157 |
import re
|
| 158 |
import subprocess
|
| 159 |
import sys
|
|
|
|
| 160 |
from pathlib import Path
|
| 161 |
|
| 162 |
parts = [p for p in sys.argv[1].split(',') if p]
|
| 163 |
out_path = Path(sys.argv[2])
|
| 164 |
max_per_node_arg = sys.argv[3]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
max_per_node = int(max_per_node_arg) if max_per_node_arg else None
|
| 166 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
for part in parts:
|
| 168 |
proc = subprocess.run(['sinfo', '-h', '-N', '-p', part, '-o', '%N'], capture_output=True, text=True, check=False)
|
| 169 |
if proc.returncode != 0:
|
|
@@ -188,37 +297,76 @@ for part in parts:
|
|
| 188 |
if max_per_node is not None:
|
| 189 |
free = min(free, max_per_node)
|
| 190 |
if free > 0:
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
PY
|
| 196 |
)"
|
| 197 |
|
| 198 |
if [[ -z "$AVAILABLE_SLOTS" || "$AVAILABLE_SLOTS" == "0" ]]; then
|
| 199 |
-
echo "No free GPU slots detected across requested partitions."
|
| 200 |
-
rm -f "$BASE_MANIFEST" "$
|
| 201 |
exit 0
|
| 202 |
fi
|
| 203 |
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
|
|
|
|
|
|
| 208 |
mv "$BASE_MANIFEST.tmp" "$BASE_MANIFEST"
|
| 209 |
fi
|
|
|
|
| 210 |
|
| 211 |
echo "Created manifest: $BASE_MANIFEST"
|
| 212 |
echo "Pending videos selected this cycle: $PENDING_COUNT"
|
| 213 |
-
echo "Available GPU slots right now: $
|
| 214 |
-
echo "Submitting now: $
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
|
| 216 |
write_claims() {
|
| 217 |
local manifest="$1"
|
| 218 |
local job_id="$2"
|
|
|
|
| 219 |
local task_id=0
|
| 220 |
while IFS= read -r video_id; do
|
| 221 |
[[ -z "$video_id" ]] && continue
|
|
|
|
| 222 |
cat > "$CLAIM_DIR/${video_id}.claim" <<CLAIM
|
| 223 |
job_id=$job_id
|
| 224 |
task_id=$task_id
|
|
@@ -226,37 +374,42 @@ job_key=${job_id}_${task_id}
|
|
| 226 |
video_id=$video_id
|
| 227 |
submitted_at=$(date '+%F %T')
|
| 228 |
CLAIM
|
| 229 |
-
|
| 230 |
done < "$manifest"
|
| 231 |
}
|
| 232 |
|
| 233 |
-
|
| 234 |
local partition="$1"
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
local job_output job_id
|
| 240 |
if [[ -n "$ARRAY_PARALLEL" ]]; then
|
| 241 |
array_spec+="%${ARRAY_PARALLEL}"
|
| 242 |
fi
|
| 243 |
local -a cmd=(sbatch
|
| 244 |
--partition "$partition"
|
| 245 |
-
--nodelist "$node"
|
| 246 |
--array "$array_spec"
|
| 247 |
--cpus-per-task "$CPUS_PER_TASK"
|
| 248 |
--mem "$MEMORY"
|
| 249 |
--time "$TIME_LIMIT"
|
| 250 |
-
--output "$LOG_DIR/dwpose_${partition}_
|
| 251 |
-
--error "$LOG_DIR/dwpose_${partition}_
|
| 252 |
-
--export "ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,CONDA_SH=$CONDA_SH,CONDA_ENV=$CONDA_ENV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,DATASET_DIR=$DATASET_DIR,STATS_NPZ=$STATS_NPZ,FPS=$FPS,FORCE_PROCESS=$FORCE_PROCESS,DELETE_SOURCE_ON_SUCCESS=$DELETE_SOURCE_ON_SUCCESS,MANIFEST=$manifest,CLAIM_DIR=$CLAIM_DIR"
|
| 253 |
)
|
| 254 |
if [[ -n "$ACCOUNT" ]]; then
|
| 255 |
cmd+=(--account "$ACCOUNT")
|
| 256 |
fi
|
| 257 |
cmd+=("$SLURM_SCRIPT")
|
| 258 |
-
echo "Submitting partition=$partition
|
| 259 |
-
job_output="$(
|
| 260 |
echo "$job_output"
|
| 261 |
job_id="$(awk '/Submitted batch job/ {print $4}' <<< "$job_output" | tail -n 1)"
|
| 262 |
if [[ -z "$job_id" ]]; then
|
|
@@ -267,36 +420,48 @@ submit_node() {
|
|
| 267 |
}
|
| 268 |
|
| 269 |
submitted_total=0
|
| 270 |
-
|
|
|
|
| 271 |
line_no=1
|
| 272 |
-
while IFS=$'
|
| 273 |
[[ -z "$partition" ]] && continue
|
| 274 |
-
if [[ "$submitted_total" -ge "$
|
| 275 |
break
|
| 276 |
fi
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
if [[ "$
|
| 280 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
fi
|
| 282 |
-
shard_manifest="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.${partition}.
|
| 283 |
sed -n "${line_no},$((line_no + assign_count - 1))p" "$BASE_MANIFEST" > "$shard_manifest"
|
| 284 |
-
shard_count="$(wc -l < "$shard_manifest" | tr -d [:space:])"
|
| 285 |
if [[ "$shard_count" == "0" ]]; then
|
| 286 |
rm -f "$shard_manifest"
|
| 287 |
continue
|
| 288 |
fi
|
| 289 |
-
if
|
| 290 |
submitted_total=$((submitted_total + shard_count))
|
|
|
|
| 291 |
line_no=$((line_no + shard_count))
|
| 292 |
else
|
| 293 |
-
|
|
|
|
| 294 |
rm -f "$shard_manifest"
|
| 295 |
-
|
| 296 |
continue
|
| 297 |
fi
|
| 298 |
-
done < "$
|
| 299 |
|
| 300 |
echo "SUBMITTED_VIDEO_COUNT=$submitted_total"
|
| 301 |
-
echo "
|
| 302 |
-
|
|
|
|
|
|
| 7 |
CONDA_SH="/home/sf895/miniconda3/etc/profile.d/conda.sh"
|
| 8 |
CONDA_ENV="signx2"
|
| 9 |
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
|
| 10 |
+
SCRATCH_RAW_VIDEO_DIR="${SCRATCH_RAW_VIDEO_DIR:-/scratch/$USER/SignVerse-2M-runtime/raw_video}"
|
| 11 |
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
|
| 12 |
+
SCRATCH_DATASET_DIR="${SCRATCH_DATASET_DIR:-/scratch/$USER/SignVerse-2M-runtime/dataset}"
|
| 13 |
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
|
| 14 |
+
PROGRESS_JSON="${PROGRESS_JSON:-$RUNTIME_ROOT/archive_upload_progress.json}"
|
| 15 |
SLURM_SCRIPT="$ROOT_DIR/slurm/process_dwpose_array.slurm"
|
| 16 |
MANIFEST_DIR="${MANIFEST_DIR:-$STATE_ROOT/slurm/manifests}"
|
| 17 |
LOG_DIR="${LOG_DIR:-$STATE_ROOT/slurm/logs}"
|
|
|
|
| 26 |
FPS="24"
|
| 27 |
LIMIT=""
|
| 28 |
ARRAY_PARALLEL=""
|
| 29 |
+
MAX_BACKLOG_VIDEOS="2980"
|
| 30 |
FORCE_PROCESS=0
|
| 31 |
DELETE_SOURCE_ON_SUCCESS=0
|
| 32 |
+
VIDEOS_PER_JOB="${VIDEOS_PER_JOB:-5}"
|
| 33 |
MAX_PER_NODE=""
|
| 34 |
+
PARTITION_QOS_COOLDOWN_SECONDS="${PARTITION_QOS_COOLDOWN_SECONDS:-300}"
|
| 35 |
+
PARTITION_QOS_HEADROOM="${PARTITION_QOS_HEADROOM:-2}"
|
| 36 |
+
PARTITION_ACTIVE_WINDOW_DEFAULT="${PARTITION_ACTIVE_WINDOW_DEFAULT:-16}"
|
| 37 |
+
PARTITION_ACTIVE_WINDOW_GPU="${PARTITION_ACTIVE_WINDOW_GPU:-16}"
|
| 38 |
+
PARTITION_ACTIVE_WINDOW_GPU_REDHAT="${PARTITION_ACTIVE_WINDOW_GPU_REDHAT:-16}"
|
| 39 |
+
PARTITION_ACTIVE_WINDOW_CGPU="${PARTITION_ACTIVE_WINDOW_CGPU:-8}"
|
| 40 |
|
| 41 |
usage() {
|
| 42 |
cat <<USAGE
|
|
|
|
| 53 |
--mem SIZE Default: 32G
|
| 54 |
--fps N Default: 24
|
| 55 |
--limit N Only submit the first N pending, unclaimed videos this cycle
|
| 56 |
+
--max-backlog-videos N Max claimed queued/running videos allowed at once. Default: 2980
|
| 57 |
+
--array-parallel N Add a %N cap to each partition array
|
| 58 |
+
--max-per-node N Cap per-node free-slot contribution before partition aggregation
|
| 59 |
+
--partition-qos-cooldown-seconds N Skip a partition for N seconds after a submit failure. Default: 300
|
| 60 |
--force-process Re-run videos even if marked complete
|
| 61 |
--delete-source-on-success Delete raw videos after successful processing
|
| 62 |
--help
|
|
|
|
| 76 |
--limit) LIMIT="$2"; shift 2 ;;
|
| 77 |
--max-backlog-videos) MAX_BACKLOG_VIDEOS="$2"; shift 2 ;;
|
| 78 |
--array-parallel) ARRAY_PARALLEL="$2"; shift 2 ;;
|
| 79 |
+
--max-per-node) MAX_PER_NODE="$2"; shift 2 ;;&
|
| 80 |
+
--videos-per-job) VIDEOS_PER_JOB="$2"; shift 2 ;;
|
| 81 |
+
--partition-qos-cooldown-seconds) PARTITION_QOS_COOLDOWN_SECONDS="$2"; shift 2 ;;
|
| 82 |
--force-process) FORCE_PROCESS=1; shift ;;
|
| 83 |
--delete-source-on-success) DELETE_SOURCE_ON_SUCCESS=1; shift ;;
|
| 84 |
-h|--help) usage; exit 0 ;;
|
|
|
|
| 96 |
TIMESTAMP="$(date '+%Y%m%d_%H%M%S')"
|
| 97 |
BASE_MANIFEST="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.txt"
|
| 98 |
ACTIVE_JOBS_FILE="$STATE_DIR/active_jobs_${TIMESTAMP}.txt"
|
| 99 |
+
PARTITION_SLOTS_FILE="$STATE_DIR/partition_slots_${TIMESTAMP}.txt"
|
| 100 |
+
PARTITION_COOLDOWN_DIR="$STATE_DIR/partition_submit_cooldowns"
|
| 101 |
+
|
| 102 |
+
mkdir -p "$PARTITION_COOLDOWN_DIR"
|
| 103 |
|
| 104 |
squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
|
| 105 |
|
| 106 |
+
PENDING_COUNT="$(python - "$RAW_VIDEO_DIR" "$SCRATCH_RAW_VIDEO_DIR" "$DATASET_DIR" "$SCRATCH_DATASET_DIR" "$LIMIT" "$FORCE_PROCESS" "$BASE_MANIFEST" "$CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$MAX_BACKLOG_VIDEOS" "$PROGRESS_JSON" <<'PY'
|
| 107 |
import sys
|
| 108 |
from pathlib import Path
|
| 109 |
|
| 110 |
raw_video_dir = Path(sys.argv[1])
|
| 111 |
+
scratch_raw_video_dir = Path(sys.argv[2])
|
| 112 |
+
dataset_dir = Path(sys.argv[3])
|
| 113 |
+
scratch_dataset_dir = Path(sys.argv[4])
|
| 114 |
+
limit_arg = sys.argv[5]
|
| 115 |
+
force = sys.argv[6] == '1'
|
| 116 |
+
manifest_path = Path(sys.argv[7])
|
| 117 |
+
claim_dir = Path(sys.argv[8])
|
| 118 |
+
active_jobs_path = Path(sys.argv[9])
|
| 119 |
+
max_backlog = int(sys.argv[10])
|
| 120 |
+
progress_path = Path(sys.argv[11])
|
| 121 |
limit = int(limit_arg) if limit_arg else None
|
| 122 |
|
| 123 |
+
uploaded_ids = set()
|
| 124 |
+
if progress_path.exists():
|
| 125 |
+
try:
|
| 126 |
+
import json
|
| 127 |
+
progress = json.loads(progress_path.read_text(encoding="utf-8"))
|
| 128 |
+
uploaded_ids = set((progress.get("uploaded_folders") or {}).keys())
|
| 129 |
+
except Exception:
|
| 130 |
+
uploaded_ids = set()
|
| 131 |
+
|
| 132 |
video_extensions = {'.mp4', '.mkv', '.webm', '.mov'}
|
| 133 |
claim_dir.mkdir(parents=True, exist_ok=True)
|
| 134 |
active_jobs = set()
|
|
|
|
| 154 |
|
| 155 |
remaining_slots = max(0, max_backlog - len(active_claims))
|
| 156 |
selected = []
|
| 157 |
+
if remaining_slots > 0:
|
| 158 |
+
raw_paths = []
|
| 159 |
+
if raw_video_dir.exists():
|
| 160 |
+
raw_paths.extend(sorted(raw_video_dir.iterdir()))
|
| 161 |
+
if scratch_raw_video_dir.exists():
|
| 162 |
+
raw_paths.extend(sorted(scratch_raw_video_dir.iterdir()))
|
| 163 |
+
seen = set()
|
| 164 |
+
for path in raw_paths:
|
| 165 |
if not path.is_file() or path.suffix.lower() not in video_extensions:
|
| 166 |
continue
|
| 167 |
video_id = path.stem
|
| 168 |
+
if video_id in seen:
|
| 169 |
+
continue
|
| 170 |
+
seen.add(video_id)
|
| 171 |
if video_id in active_claims:
|
| 172 |
continue
|
| 173 |
+
if video_id in uploaded_ids:
|
| 174 |
+
continue
|
| 175 |
complete_marker = dataset_dir / video_id / 'npz' / '.complete'
|
| 176 |
+
scratch_complete_marker = scratch_dataset_dir / video_id / 'npz' / '.complete'
|
| 177 |
+
if not force and (complete_marker.exists() or scratch_complete_marker.exists()):
|
| 178 |
continue
|
| 179 |
selected.append(video_id)
|
| 180 |
if len(selected) >= remaining_slots:
|
|
|
|
| 193 |
exit 0
|
| 194 |
fi
|
| 195 |
|
| 196 |
+
AVAILABLE_SLOTS="$(python - "$PARTITIONS" "$PARTITION_SLOTS_FILE" "$MAX_PER_NODE" "$USER" "$PARTITION_COOLDOWN_DIR" "$PARTITION_QOS_COOLDOWN_SECONDS" "$PARTITION_QOS_HEADROOM" "$PARTITION_ACTIVE_WINDOW_DEFAULT" "$PARTITION_ACTIVE_WINDOW_GPU" "$PARTITION_ACTIVE_WINDOW_GPU_REDHAT" "$PARTITION_ACTIVE_WINDOW_CGPU" <<'PY'
|
| 197 |
import re
|
| 198 |
import subprocess
|
| 199 |
import sys
|
| 200 |
+
from collections import defaultdict
|
| 201 |
from pathlib import Path
|
| 202 |
|
| 203 |
parts = [p for p in sys.argv[1].split(',') if p]
|
| 204 |
out_path = Path(sys.argv[2])
|
| 205 |
max_per_node_arg = sys.argv[3]
|
| 206 |
+
user = sys.argv[4]
|
| 207 |
+
cooldown_dir = Path(sys.argv[5])
|
| 208 |
+
cooldown_seconds = int(sys.argv[6])
|
| 209 |
+
qos_headroom = int(sys.argv[7])
|
| 210 |
+
partition_active_window_default = int(sys.argv[8])
|
| 211 |
+
partition_active_window_gpu = int(sys.argv[9])
|
| 212 |
+
partition_active_window_gpu_redhat = int(sys.argv[10])
|
| 213 |
+
partition_active_window_cgpu = int(sys.argv[11])
|
| 214 |
+
partition_active_window_map = {
|
| 215 |
+
"gpu": partition_active_window_gpu,
|
| 216 |
+
"gpu-redhat": partition_active_window_gpu_redhat,
|
| 217 |
+
"cgpu": partition_active_window_cgpu,
|
| 218 |
+
}
|
| 219 |
max_per_node = int(max_per_node_arg) if max_per_node_arg else None
|
| 220 |
+
|
| 221 |
+
qos_limit_by_part = {}
|
| 222 |
+
try:
|
| 223 |
+
proc = subprocess.run(['sacctmgr', 'show', 'qos', 'format=Name,MaxSubmitPU', '-P'], capture_output=True, text=True, check=False)
|
| 224 |
+
for line in (proc.stdout or '').splitlines():
|
| 225 |
+
if not line.strip() or '|' not in line:
|
| 226 |
+
continue
|
| 227 |
+
name, max_submit = line.split('|', 1)
|
| 228 |
+
name = name.strip()
|
| 229 |
+
max_submit = max_submit.strip()
|
| 230 |
+
if name in parts and max_submit:
|
| 231 |
+
try:
|
| 232 |
+
qos_limit_by_part[name] = int(max_submit)
|
| 233 |
+
except ValueError:
|
| 234 |
+
pass
|
| 235 |
+
except Exception:
|
| 236 |
+
qos_limit_by_part = {}
|
| 237 |
+
|
| 238 |
+
def expand_count(jobid_token: str) -> int:
|
| 239 |
+
m = re.match(r'^(\d+)_\[(.+)\]$', jobid_token)
|
| 240 |
+
if not m:
|
| 241 |
+
return 1
|
| 242 |
+
body = m.group(2)
|
| 243 |
+
if '%' in body:
|
| 244 |
+
body = body.split('%', 1)[0]
|
| 245 |
+
total = 0
|
| 246 |
+
for part in body.split(','):
|
| 247 |
+
part = part.strip()
|
| 248 |
+
if not part:
|
| 249 |
+
continue
|
| 250 |
+
if '-' in part:
|
| 251 |
+
a, b = part.split('-', 1)
|
| 252 |
+
try:
|
| 253 |
+
total += int(b) - int(a) + 1
|
| 254 |
+
except ValueError:
|
| 255 |
+
total += 1
|
| 256 |
+
else:
|
| 257 |
+
total += 1
|
| 258 |
+
return max(total, 1)
|
| 259 |
+
|
| 260 |
+
active_tasks_by_part = defaultdict(int)
|
| 261 |
+
try:
|
| 262 |
+
proc = subprocess.run(['squeue', '-u', user, '-h', '-o', '%i|%P|%T'], capture_output=True, text=True, check=False)
|
| 263 |
+
for line in (proc.stdout or '').splitlines():
|
| 264 |
+
if not line.strip() or '|' not in line:
|
| 265 |
+
continue
|
| 266 |
+
jobid, partition, state = [x.strip() for x in line.split('|', 2)]
|
| 267 |
+
if partition not in parts:
|
| 268 |
+
continue
|
| 269 |
+
if state not in {'RUNNING', 'PENDING', 'CONFIGURING'}:
|
| 270 |
+
continue
|
| 271 |
+
active_tasks_by_part[partition] += expand_count(jobid)
|
| 272 |
+
except Exception:
|
| 273 |
+
active_tasks_by_part = defaultdict(int)
|
| 274 |
+
|
| 275 |
+
free_gpus_by_part = defaultdict(int)
|
| 276 |
for part in parts:
|
| 277 |
proc = subprocess.run(['sinfo', '-h', '-N', '-p', part, '-o', '%N'], capture_output=True, text=True, check=False)
|
| 278 |
if proc.returncode != 0:
|
|
|
|
| 297 |
if max_per_node is not None:
|
| 298 |
free = min(free, max_per_node)
|
| 299 |
if free > 0:
|
| 300 |
+
free_gpus_by_part[part] += free
|
| 301 |
+
|
| 302 |
+
ordered = []
|
| 303 |
+
now = __import__('time').time()
|
| 304 |
+
for part in parts:
|
| 305 |
+
cooldown_path = cooldown_dir / f'{part}.until'
|
| 306 |
+
if cooldown_path.exists():
|
| 307 |
+
try:
|
| 308 |
+
until_ts = float(cooldown_path.read_text(encoding='utf-8').strip() or '0')
|
| 309 |
+
except Exception:
|
| 310 |
+
until_ts = 0.0
|
| 311 |
+
if until_ts > now:
|
| 312 |
+
continue
|
| 313 |
+
cooldown_path.unlink(missing_ok=True)
|
| 314 |
+
free_slots = free_gpus_by_part.get(part, 0)
|
| 315 |
+
if free_slots <= 0:
|
| 316 |
+
continue
|
| 317 |
+
qos_limit = qos_limit_by_part.get(part)
|
| 318 |
+
active_tasks = active_tasks_by_part.get(part, 0)
|
| 319 |
+
submit_slots = free_slots
|
| 320 |
+
if qos_limit is not None:
|
| 321 |
+
submit_slots = min(submit_slots, max(0, qos_limit - active_tasks - qos_headroom))
|
| 322 |
+
part_window = partition_active_window_map.get(part, partition_active_window_default)
|
| 323 |
+
submit_slots = min(submit_slots, max(0, part_window - active_tasks))
|
| 324 |
+
if submit_slots > 0:
|
| 325 |
+
ordered.append((part, free_slots, active_tasks, qos_limit if qos_limit is not None else 'na', submit_slots))
|
| 326 |
+
|
| 327 |
+
ordered.sort(key=lambda row: (-int(row[4]), -int(row[1]), row[0]))
|
| 328 |
+
|
| 329 |
+
out_path.write_text(''.join(f'{part}\t{free}\t{active}\t{qos}\t{submit}\n' for part, free, active, qos, submit in ordered), encoding='utf-8')
|
| 330 |
+
print(sum(submit for _, _, _, _, submit in ordered))
|
| 331 |
PY
|
| 332 |
)"
|
| 333 |
|
| 334 |
if [[ -z "$AVAILABLE_SLOTS" || "$AVAILABLE_SLOTS" == "0" ]]; then
|
| 335 |
+
echo "No free GPU/QoS submission slots detected across requested partitions."
|
| 336 |
+
rm -f "$BASE_MANIFEST" "$PARTITION_SLOTS_FILE"
|
| 337 |
exit 0
|
| 338 |
fi
|
| 339 |
|
| 340 |
+
AVAILABLE_JOB_SLOTS="$AVAILABLE_SLOTS"
|
| 341 |
+
TARGET_VIDEO_COUNT="$PENDING_COUNT"
|
| 342 |
+
MAX_VIDEO_COUNT="$((AVAILABLE_JOB_SLOTS * VIDEOS_PER_JOB))"
|
| 343 |
+
if [[ "$TARGET_VIDEO_COUNT" -gt "$MAX_VIDEO_COUNT" ]]; then
|
| 344 |
+
TARGET_VIDEO_COUNT="$MAX_VIDEO_COUNT"
|
| 345 |
+
head -n "$TARGET_VIDEO_COUNT" "$BASE_MANIFEST" > "$BASE_MANIFEST.tmp"
|
| 346 |
mv "$BASE_MANIFEST.tmp" "$BASE_MANIFEST"
|
| 347 |
fi
|
| 348 |
+
TARGET_JOB_COUNT="$(((TARGET_VIDEO_COUNT + VIDEOS_PER_JOB - 1) / VIDEOS_PER_JOB))"
|
| 349 |
|
| 350 |
echo "Created manifest: $BASE_MANIFEST"
|
| 351 |
echo "Pending videos selected this cycle: $PENDING_COUNT"
|
| 352 |
+
echo "Available GPU/QoS job slots right now: $AVAILABLE_JOB_SLOTS"
|
| 353 |
+
echo "Submitting jobs now: $TARGET_JOB_COUNT"
|
| 354 |
+
echo "Submitting videos now: $TARGET_VIDEO_COUNT"
|
| 355 |
+
|
| 356 |
+
echo "Per-partition capacity snapshot:"
|
| 357 |
+
while IFS=$'\t' read -r partition free_slots active_tasks qos_limit submit_slots; do
|
| 358 |
+
[[ -z "$partition" ]] && continue
|
| 359 |
+
echo " partition=$partition free_gpus=$free_slots active_tasks=$active_tasks qos_limit=${qos_limit:-na} submit_slots=$submit_slots"
|
| 360 |
+
done < "$PARTITION_SLOTS_FILE"
|
| 361 |
|
| 362 |
write_claims() {
|
| 363 |
local manifest="$1"
|
| 364 |
local job_id="$2"
|
| 365 |
+
local index=0
|
| 366 |
local task_id=0
|
| 367 |
while IFS= read -r video_id; do
|
| 368 |
[[ -z "$video_id" ]] && continue
|
| 369 |
+
task_id=$((index / VIDEOS_PER_JOB))
|
| 370 |
cat > "$CLAIM_DIR/${video_id}.claim" <<CLAIM
|
| 371 |
job_id=$job_id
|
| 372 |
task_id=$task_id
|
|
|
|
| 374 |
video_id=$video_id
|
| 375 |
submitted_at=$(date '+%F %T')
|
| 376 |
CLAIM
|
| 377 |
+
index=$((index + 1))
|
| 378 |
done < "$manifest"
|
| 379 |
}
|
| 380 |
|
| 381 |
+
mark_partition_submit_cooldown() {
|
| 382 |
local partition="$1"
|
| 383 |
+
mkdir -p "$PARTITION_COOLDOWN_DIR"
|
| 384 |
+
printf '%s\n' "$(( $(date +%s) + PARTITION_QOS_COOLDOWN_SECONDS ))" > "$PARTITION_COOLDOWN_DIR/${partition}.until"
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
submit_partition() {
|
| 388 |
+
local partition="$1"
|
| 389 |
+
local manifest="$2"
|
| 390 |
+
local count="$3"
|
| 391 |
+
local task_count="$(((count + VIDEOS_PER_JOB - 1) / VIDEOS_PER_JOB))"
|
| 392 |
+
local array_spec="0-$((task_count - 1))"
|
| 393 |
local job_output job_id
|
| 394 |
if [[ -n "$ARRAY_PARALLEL" ]]; then
|
| 395 |
array_spec+="%${ARRAY_PARALLEL}"
|
| 396 |
fi
|
| 397 |
local -a cmd=(sbatch
|
| 398 |
--partition "$partition"
|
|
|
|
| 399 |
--array "$array_spec"
|
| 400 |
--cpus-per-task "$CPUS_PER_TASK"
|
| 401 |
--mem "$MEMORY"
|
| 402 |
--time "$TIME_LIMIT"
|
| 403 |
+
--output "$LOG_DIR/dwpose_${partition}_%A_%a.out"
|
| 404 |
+
--error "$LOG_DIR/dwpose_${partition}_%A_%a.err"
|
| 405 |
+
--export "ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,CONDA_SH=$CONDA_SH,CONDA_ENV=$CONDA_ENV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,SCRATCH_RAW_VIDEO_DIR=$SCRATCH_RAW_VIDEO_DIR,DATASET_DIR=$DATASET_DIR,STATS_NPZ=$STATS_NPZ,FPS=$FPS,FORCE_PROCESS=$FORCE_PROCESS,DELETE_SOURCE_ON_SUCCESS=$DELETE_SOURCE_ON_SUCCESS,MANIFEST=$manifest,CLAIM_DIR=$CLAIM_DIR,VIDEOS_PER_JOB=$VIDEOS_PER_JOB"
|
| 406 |
)
|
| 407 |
if [[ -n "$ACCOUNT" ]]; then
|
| 408 |
cmd+=(--account "$ACCOUNT")
|
| 409 |
fi
|
| 410 |
cmd+=("$SLURM_SCRIPT")
|
| 411 |
+
echo "Submitting partition=$partition array=$array_spec manifest=$manifest"
|
| 412 |
+
job_output="$(${cmd[@]} 2>&1)"
|
| 413 |
echo "$job_output"
|
| 414 |
job_id="$(awk '/Submitted batch job/ {print $4}' <<< "$job_output" | tail -n 1)"
|
| 415 |
if [[ -z "$job_id" ]]; then
|
|
|
|
| 420 |
}
|
| 421 |
|
| 422 |
submitted_total=0
|
| 423 |
+
submitted_jobs=0
|
| 424 |
+
failed_partitions=0
|
| 425 |
line_no=1
|
| 426 |
+
while IFS=$' ' read -r partition free_slots active_tasks qos_limit submit_slots; do
|
| 427 |
[[ -z "$partition" ]] && continue
|
| 428 |
+
if [[ "$submitted_total" -ge "$TARGET_VIDEO_COUNT" ]]; then
|
| 429 |
break
|
| 430 |
fi
|
| 431 |
+
remaining_videos=$((TARGET_VIDEO_COUNT - submitted_total))
|
| 432 |
+
assign_jobs="$submit_slots"
|
| 433 |
+
if [[ "$assign_jobs" -le 0 ]]; then
|
| 434 |
+
continue
|
| 435 |
+
fi
|
| 436 |
+
max_videos_for_partition=$((assign_jobs * VIDEOS_PER_JOB))
|
| 437 |
+
assign_count="$remaining_videos"
|
| 438 |
+
if [[ "$assign_count" -gt "$max_videos_for_partition" ]]; then
|
| 439 |
+
assign_count="$max_videos_for_partition"
|
| 440 |
+
fi
|
| 441 |
+
if [[ "$assign_count" -le 0 ]]; then
|
| 442 |
+
continue
|
| 443 |
fi
|
| 444 |
+
shard_manifest="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.${partition}.txt"
|
| 445 |
sed -n "${line_no},$((line_no + assign_count - 1))p" "$BASE_MANIFEST" > "$shard_manifest"
|
| 446 |
+
shard_count="$(wc -l < "$shard_manifest" | tr -d '[:space:]')"
|
| 447 |
if [[ "$shard_count" == "0" ]]; then
|
| 448 |
rm -f "$shard_manifest"
|
| 449 |
continue
|
| 450 |
fi
|
| 451 |
+
if submit_partition "$partition" "$shard_manifest" "$shard_count"; then
|
| 452 |
submitted_total=$((submitted_total + shard_count))
|
| 453 |
+
submitted_jobs=$((submitted_jobs + ((shard_count + VIDEOS_PER_JOB - 1) / VIDEOS_PER_JOB)))
|
| 454 |
line_no=$((line_no + shard_count))
|
| 455 |
else
|
| 456 |
+
mark_partition_submit_cooldown "$partition"
|
| 457 |
+
echo "Partition-level submit failed for partition=$partition; cooling down and leaving those videos unclaimed for the next cycle." >&2
|
| 458 |
rm -f "$shard_manifest"
|
| 459 |
+
failed_partitions=$((failed_partitions + 1))
|
| 460 |
continue
|
| 461 |
fi
|
| 462 |
+
done < "$PARTITION_SLOTS_FILE"
|
| 463 |
|
| 464 |
echo "SUBMITTED_VIDEO_COUNT=$submitted_total"
|
| 465 |
+
echo "SUBMITTED_JOB_COUNT=$submitted_jobs"
|
| 466 |
+
echo "FAILED_PARTITION_SUBMITS=$failed_partitions"
|
| 467 |
+
rm -f "$PARTITION_SLOTS_FILE"
|
slurm/submit_upload_parallel_drain.sh
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
ROOT_DIR="${ROOT_DIR:-/home/sf895/SignVerse-2M}"
|
| 5 |
+
SUBMIT_SCRIPT="${SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_upload_parallel_slurm.sh}"
|
| 6 |
+
PARALLEL_SHARDS="${PARALLEL_SHARDS:-8}"
|
| 7 |
+
ARRAY_PARALLEL="${ARRAY_PARALLEL:-8}"
|
| 8 |
+
PARTITION="${PARTITION:-main}"
|
| 9 |
+
START_STAGGER_MIN="${START_STAGGER_MIN:-1}"
|
| 10 |
+
START_STAGGER_MAX="${START_STAGGER_MAX:-3}"
|
| 11 |
+
TARGET_BYTES="${TARGET_BYTES:-10737418240}"
|
| 12 |
+
TARGET_FOLDERS="${TARGET_FOLDERS:-40}"
|
| 13 |
+
SLEEP_BETWEEN_WAVES="${SLEEP_BETWEEN_WAVES:-15}"
|
| 14 |
+
PROGRESS_JSON="${PROGRESS_JSON:-/home/sf895/SignVerse-2M-runtime/archive_upload_progress.json}"
|
| 15 |
+
HOME_DATASET_DIR="${HOME_DATASET_DIR:-/home/sf895/SignVerse-2M-runtime/dataset}"
|
| 16 |
+
SCRATCH_DATASET_DIR="${SCRATCH_DATASET_DIR:-/scratch/$USER/SignVerse-2M-runtime/dataset}"
|
| 17 |
+
|
| 18 |
+
remaining_unuploaded() {
|
| 19 |
+
python3 - <<PY
|
| 20 |
+
import json
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
import sys
|
| 23 |
+
sys.path.insert(0, '/cache/home/sf895/SignVerse-2M')
|
| 24 |
+
from utils.dataset_pool import list_unuploaded_folder_paths
|
| 25 |
+
progress = json.loads(Path(r"$PROGRESS_JSON").read_text())
|
| 26 |
+
folders = list_unuploaded_folder_paths(Path(r"$HOME_DATASET_DIR"), Path(r"$SCRATCH_DATASET_DIR"), progress.get('uploaded_folders', {}))
|
| 27 |
+
print(len(folders))
|
| 28 |
+
PY
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
while true; do
|
| 32 |
+
remaining="$(remaining_unuploaded)"
|
| 33 |
+
echo "[drain] remaining_unuploaded=$remaining"
|
| 34 |
+
if [[ "$remaining" == "0" ]]; then
|
| 35 |
+
echo "[drain] backlog cleared"
|
| 36 |
+
break
|
| 37 |
+
fi
|
| 38 |
+
|
| 39 |
+
shards="$PARALLEL_SHARDS"
|
| 40 |
+
array_parallel="$ARRAY_PARALLEL"
|
| 41 |
+
extra_args=()
|
| 42 |
+
needed_shards=$(((remaining + TARGET_FOLDERS - 1) / TARGET_FOLDERS))
|
| 43 |
+
if (( needed_shards < shards )); then
|
| 44 |
+
shards="$needed_shards"
|
| 45 |
+
fi
|
| 46 |
+
if (( shards < 1 )); then
|
| 47 |
+
shards=1
|
| 48 |
+
fi
|
| 49 |
+
if (( shards == 1 )); then
|
| 50 |
+
array_parallel=""
|
| 51 |
+
extra_args+=(--allow-small-final-batch --no-require-target-bytes)
|
| 52 |
+
echo "[drain] switching to single-shard tail mode"
|
| 53 |
+
else
|
| 54 |
+
if (( array_parallel > shards )); then
|
| 55 |
+
array_parallel="$shards"
|
| 56 |
+
fi
|
| 57 |
+
echo "[drain] adaptive shard count=$shards"
|
| 58 |
+
fi
|
| 59 |
+
|
| 60 |
+
submit_cmd=(bash "$SUBMIT_SCRIPT"
|
| 61 |
+
--parallel-shards "$shards"
|
| 62 |
+
--partition "$PARTITION"
|
| 63 |
+
--start-stagger-min "$START_STAGGER_MIN"
|
| 64 |
+
--start-stagger-max "$START_STAGGER_MAX"
|
| 65 |
+
--target-bytes "$TARGET_BYTES"
|
| 66 |
+
--target-folders "$TARGET_FOLDERS"
|
| 67 |
+
)
|
| 68 |
+
if [[ -n "$array_parallel" ]]; then
|
| 69 |
+
submit_cmd+=(--array-parallel "$array_parallel")
|
| 70 |
+
fi
|
| 71 |
+
if (( ${#extra_args[@]} > 0 )); then
|
| 72 |
+
submit_cmd+=("${extra_args[@]}")
|
| 73 |
+
fi
|
| 74 |
+
|
| 75 |
+
submit_out="$("${submit_cmd[@]}")"
|
| 76 |
+
echo "$submit_out"
|
| 77 |
+
job_id="$(awk '/Submitted batch job/{print $4}' <<< "$submit_out" | tail -n1)"
|
| 78 |
+
if [[ -z "$job_id" ]]; then
|
| 79 |
+
echo "[drain] failed to parse submitted job id" >&2
|
| 80 |
+
exit 1
|
| 81 |
+
fi
|
| 82 |
+
|
| 83 |
+
while [[ -n "$(squeue -h -j "$job_id" 2>/dev/null)" ]]; do
|
| 84 |
+
sleep "$SLEEP_BETWEEN_WAVES"
|
| 85 |
+
done
|
| 86 |
+
sleep 2
|
| 87 |
+
done
|
slurm/submit_upload_parallel_slurm.sh
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
ROOT_DIR="${ROOT_DIR:-/home/sf895/SignVerse-2M}"
|
| 5 |
+
RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/SignVerse-2M-runtime}"
|
| 6 |
+
SLURM_SCRIPT="${SLURM_SCRIPT:-$ROOT_DIR/slurm/process_upload_parallel_array.slurm}"
|
| 7 |
+
PARTITION="${PARTITION:-main}"
|
| 8 |
+
TIME_LIMIT="${TIME_LIMIT:-24:00:00}"
|
| 9 |
+
CPUS_PER_TASK="${CPUS_PER_TASK:-2}"
|
| 10 |
+
MEMORY="${MEMORY:-8G}"
|
| 11 |
+
PARALLEL_SHARDS="${PARALLEL_SHARDS:-4}"
|
| 12 |
+
ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
|
| 13 |
+
START_STAGGER_MIN="${START_STAGGER_MIN:-1}"
|
| 14 |
+
START_STAGGER_MAX="${START_STAGGER_MAX:-3}"
|
| 15 |
+
TARGET_BYTES="${TARGET_BYTES:-10737418240}"
|
| 16 |
+
TARGET_FOLDERS="${TARGET_FOLDERS:-40}"
|
| 17 |
+
ALLOW_SMALL_FINAL_BATCH="${ALLOW_SMALL_FINAL_BATCH:-0}"
|
| 18 |
+
REQUIRE_TARGET_BYTES="${REQUIRE_TARGET_BYTES:-1}"
|
| 19 |
+
DRY_RUN_UPLOAD="${DRY_RUN_UPLOAD:-0}"
|
| 20 |
+
UPLOAD_MODE="${UPLOAD_MODE:-api}"
|
| 21 |
+
REPO_ID="${REPO_ID:-SignerX/SignVerse-2M}"
|
| 22 |
+
REPO_REVISION="${REPO_REVISION:-dev}"
|
| 23 |
+
|
| 24 |
+
usage() {
|
| 25 |
+
cat <<USAGE
|
| 26 |
+
Usage:
|
| 27 |
+
bash slurm/submit_upload_parallel_slurm.sh [options]
|
| 28 |
+
|
| 29 |
+
Options:
|
| 30 |
+
--parallel-shards N Number of uploader shards. Default: 4
|
| 31 |
+
--array-parallel N Optional Slurm array concurrency cap
|
| 32 |
+
--partition NAME Slurm partition for upload jobs. Default: main
|
| 33 |
+
--time HH:MM:SS Default: 24:00:00
|
| 34 |
+
--cpus-per-task N Default: 2
|
| 35 |
+
--mem SIZE Default: 8G
|
| 36 |
+
--target-bytes N Default: 10737418240
|
| 37 |
+
--target-folders N Default: 40
|
| 38 |
+
--start-stagger-min N Default: 1
|
| 39 |
+
--start-stagger-max N Default: 3
|
| 40 |
+
--allow-small-final-batch Permit smaller final shard upload batch
|
| 41 |
+
--no-require-target-bytes Allow upload even below target-bytes
|
| 42 |
+
--dry-run Dry-run uploader
|
| 43 |
+
--upload-mode MODE api | api-stream | git-ssh. Default: api
|
| 44 |
+
--repo-id ID Default: SignerX/SignVerse-2M
|
| 45 |
+
--repo-revision BRANCH Default: dev
|
| 46 |
+
--help
|
| 47 |
+
USAGE
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
while [[ $# -gt 0 ]]; do
|
| 51 |
+
case "$1" in
|
| 52 |
+
--parallel-shards) PARALLEL_SHARDS="$2"; shift 2 ;;
|
| 53 |
+
--array-parallel) ARRAY_PARALLEL="$2"; shift 2 ;;
|
| 54 |
+
--partition) PARTITION="$2"; shift 2 ;;
|
| 55 |
+
--time) TIME_LIMIT="$2"; shift 2 ;;
|
| 56 |
+
--cpus-per-task) CPUS_PER_TASK="$2"; shift 2 ;;
|
| 57 |
+
--mem) MEMORY="$2"; shift 2 ;;
|
| 58 |
+
--target-bytes) TARGET_BYTES="$2"; shift 2 ;;
|
| 59 |
+
--target-folders) TARGET_FOLDERS="$2"; shift 2 ;;
|
| 60 |
+
--start-stagger-min) START_STAGGER_MIN="$2"; shift 2 ;;
|
| 61 |
+
--start-stagger-max) START_STAGGER_MAX="$2"; shift 2 ;;
|
| 62 |
+
--allow-small-final-batch) ALLOW_SMALL_FINAL_BATCH=1; shift ;;
|
| 63 |
+
--no-require-target-bytes) REQUIRE_TARGET_BYTES=0; shift ;;
|
| 64 |
+
--dry-run) DRY_RUN_UPLOAD=1; shift ;;
|
| 65 |
+
--upload-mode) UPLOAD_MODE="$2"; shift 2 ;;
|
| 66 |
+
--repo-id) REPO_ID="$2"; shift 2 ;;
|
| 67 |
+
--repo-revision) REPO_REVISION="$2"; shift 2 ;;
|
| 68 |
+
-h|--help) usage; exit 0 ;;
|
| 69 |
+
*) echo "Unknown argument: $1" >&2; usage >&2; exit 1 ;;
|
| 70 |
+
esac
|
| 71 |
+
done
|
| 72 |
+
|
| 73 |
+
if (( PARALLEL_SHARDS < 1 )); then
|
| 74 |
+
echo "--parallel-shards must be >= 1" >&2
|
| 75 |
+
exit 1
|
| 76 |
+
fi
|
| 77 |
+
|
| 78 |
+
if (( START_STAGGER_MAX < START_STAGGER_MIN )); then
|
| 79 |
+
echo "--start-stagger-max must be >= --start-stagger-min" >&2
|
| 80 |
+
exit 1
|
| 81 |
+
fi
|
| 82 |
+
|
| 83 |
+
array_spec="0-$((PARALLEL_SHARDS - 1))"
|
| 84 |
+
if [[ -n "$ARRAY_PARALLEL" ]]; then
|
| 85 |
+
array_spec="${array_spec}%${ARRAY_PARALLEL}"
|
| 86 |
+
fi
|
| 87 |
+
|
| 88 |
+
sbatch \
|
| 89 |
+
--partition="$PARTITION" \
|
| 90 |
+
--time="$TIME_LIMIT" \
|
| 91 |
+
--cpus-per-task="$CPUS_PER_TASK" \
|
| 92 |
+
--mem="$MEMORY" \
|
| 93 |
+
--array="$array_spec" \
|
| 94 |
+
--export=ALL,ROOT_DIR="$ROOT_DIR",RUNTIME_ROOT="$RUNTIME_ROOT",PARALLEL_SHARDS="$PARALLEL_SHARDS",START_STAGGER_MIN="$START_STAGGER_MIN",START_STAGGER_MAX="$START_STAGGER_MAX",TARGET_BYTES="$TARGET_BYTES",TARGET_FOLDERS="$TARGET_FOLDERS",ALLOW_SMALL_FINAL_BATCH="$ALLOW_SMALL_FINAL_BATCH",REQUIRE_TARGET_BYTES="$REQUIRE_TARGET_BYTES",DRY_RUN_UPLOAD="$DRY_RUN_UPLOAD",UPLOAD_MODE="$UPLOAD_MODE",REPO_ID="$REPO_ID",REPO_REVISION="$REPO_REVISION" \
|
| 95 |
+
"$SLURM_SCRIPT"
|
slurm/watch_submit_dwpose.slurm
CHANGED
|
@@ -15,9 +15,9 @@ SUBMIT_SCRIPT="${SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_dwpose_slurm.sh}"
|
|
| 15 |
GPU_PARTITIONS="${GPU_PARTITIONS:-gpu,gpu-redhat,cgpu}"
|
| 16 |
GPU_ACCOUNT="${GPU_ACCOUNT:-}"
|
| 17 |
SCAN_INTERVAL_SECONDS="${SCAN_INTERVAL_SECONDS:-60}"
|
| 18 |
-
SUBMIT_LIMIT="${SUBMIT_LIMIT:-
|
| 19 |
ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
|
| 20 |
-
MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-
|
| 21 |
FPS="${FPS:-24}"
|
| 22 |
MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
|
| 23 |
FORCE_PROCESS="${FORCE_PROCESS:-0}"
|
|
|
|
| 15 |
GPU_PARTITIONS="${GPU_PARTITIONS:-gpu,gpu-redhat,cgpu}"
|
| 16 |
GPU_ACCOUNT="${GPU_ACCOUNT:-}"
|
| 17 |
SCAN_INTERVAL_SECONDS="${SCAN_INTERVAL_SECONDS:-60}"
|
| 18 |
+
SUBMIT_LIMIT="${SUBMIT_LIMIT:-2980}"
|
| 19 |
ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
|
| 20 |
+
MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-2980}"
|
| 21 |
FPS="${FPS:-24}"
|
| 22 |
MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
|
| 23 |
FORCE_PROCESS="${FORCE_PROCESS:-0}"
|
utils/dataset_pool.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Dict, Iterator, List, Tuple
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
COMPLETE_MARKER_NAME = ".complete"
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def dataset_dir_for_video(
|
| 11 |
+
video_path: Path,
|
| 12 |
+
home_raw_dir: Path,
|
| 13 |
+
scratch_raw_dir: Path | None,
|
| 14 |
+
home_dataset_dir: Path,
|
| 15 |
+
scratch_dataset_dir: Path | None,
|
| 16 |
+
) -> Path:
|
| 17 |
+
if scratch_raw_dir is not None and scratch_dataset_dir is not None:
|
| 18 |
+
try:
|
| 19 |
+
if video_path.parent.resolve() == scratch_raw_dir.resolve():
|
| 20 |
+
return scratch_dataset_dir
|
| 21 |
+
except FileNotFoundError:
|
| 22 |
+
pass
|
| 23 |
+
return home_dataset_dir
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def iter_dataset_video_dirs(home_dataset_dir: Path, scratch_dataset_dir: Path | None = None) -> Iterator[Tuple[str, Path]]:
|
| 27 |
+
seen: Dict[str, Path] = {}
|
| 28 |
+
for dataset_dir in [home_dataset_dir, scratch_dataset_dir]:
|
| 29 |
+
if dataset_dir is None or not dataset_dir.exists():
|
| 30 |
+
continue
|
| 31 |
+
for folder_path in sorted(dataset_dir.iterdir()):
|
| 32 |
+
if not folder_path.is_dir():
|
| 33 |
+
continue
|
| 34 |
+
seen.setdefault(folder_path.name, folder_path)
|
| 35 |
+
for video_id, folder_path in sorted(seen.items()):
|
| 36 |
+
yield video_id, folder_path
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def complete_video_ids(home_dataset_dir: Path, scratch_dataset_dir: Path | None = None) -> set[str]:
|
| 40 |
+
complete: set[str] = set()
|
| 41 |
+
for video_id, folder_path in iter_dataset_video_dirs(home_dataset_dir, scratch_dataset_dir):
|
| 42 |
+
if (folder_path / "npz" / COMPLETE_MARKER_NAME).exists():
|
| 43 |
+
complete.add(video_id)
|
| 44 |
+
return complete
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def count_complete(home_dataset_dir: Path, scratch_dataset_dir: Path | None = None) -> int:
|
| 48 |
+
return len(complete_video_ids(home_dataset_dir, scratch_dataset_dir))
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def find_dataset_video_dir(video_id: str, home_dataset_dir: Path, scratch_dataset_dir: Path | None = None) -> Path:
|
| 52 |
+
home_path = home_dataset_dir / video_id
|
| 53 |
+
if home_path.exists():
|
| 54 |
+
return home_path
|
| 55 |
+
if scratch_dataset_dir is not None:
|
| 56 |
+
scratch_path = scratch_dataset_dir / video_id
|
| 57 |
+
if scratch_path.exists():
|
| 58 |
+
return scratch_path
|
| 59 |
+
return home_path
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def list_unuploaded_folder_paths(
|
| 63 |
+
home_dataset_dir: Path,
|
| 64 |
+
scratch_dataset_dir: Path | None,
|
| 65 |
+
uploaded_folders: Dict[str, object],
|
| 66 |
+
) -> List[Tuple[str, Path]]:
|
| 67 |
+
folders: List[Tuple[str, Path]] = []
|
| 68 |
+
for video_id, folder_path in iter_dataset_video_dirs(home_dataset_dir, scratch_dataset_dir):
|
| 69 |
+
if video_id in uploaded_folders:
|
| 70 |
+
continue
|
| 71 |
+
if not (folder_path / "npz" / COMPLETE_MARKER_NAME).exists():
|
| 72 |
+
continue
|
| 73 |
+
folders.append((video_id, folder_path))
|
| 74 |
+
return folders
|
utils/raw_video_pool.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import contextlib
|
| 4 |
+
import fcntl
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Iterable, Iterator, Sequence
|
| 7 |
+
|
| 8 |
+
VIDEO_EXTENSIONS = {".mp4", ".mkv", ".webm", ".mov"}
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def existing_raw_dirs(*dirs: Path | None) -> list[Path]:
|
| 12 |
+
result: list[Path] = []
|
| 13 |
+
seen: set[str] = set()
|
| 14 |
+
for directory in dirs:
|
| 15 |
+
if directory is None:
|
| 16 |
+
continue
|
| 17 |
+
key = str(directory)
|
| 18 |
+
if key in seen:
|
| 19 |
+
continue
|
| 20 |
+
seen.add(key)
|
| 21 |
+
result.append(directory)
|
| 22 |
+
return result
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def collect_raw_videos(*dirs: Path | None) -> dict[str, Path]:
|
| 26 |
+
videos: dict[str, Path] = {}
|
| 27 |
+
for directory in existing_raw_dirs(*dirs):
|
| 28 |
+
if not directory.exists():
|
| 29 |
+
continue
|
| 30 |
+
for path in sorted(directory.iterdir()):
|
| 31 |
+
if not path.is_file() or path.suffix.lower() not in VIDEO_EXTENSIONS:
|
| 32 |
+
continue
|
| 33 |
+
videos.setdefault(path.stem, path)
|
| 34 |
+
return videos
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def count_raw_videos(*dirs: Path | None) -> int:
|
| 38 |
+
return len(collect_raw_videos(*dirs))
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def sum_raw_video_sizes(*dirs: Path | None) -> int:
|
| 42 |
+
return sum(path.stat().st_size for path in collect_raw_videos(*dirs).values() if path.exists())
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def iter_raw_video_files(*dirs: Path | None) -> Iterator[Path]:
|
| 46 |
+
for path in collect_raw_videos(*dirs).values():
|
| 47 |
+
yield path
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def find_video_file(video_id: str, *dirs: Path | None) -> Path | None:
|
| 51 |
+
for directory in existing_raw_dirs(*dirs):
|
| 52 |
+
if not directory.exists():
|
| 53 |
+
continue
|
| 54 |
+
candidates = []
|
| 55 |
+
for path in directory.glob(f"{video_id}.*"):
|
| 56 |
+
if path.is_file() and path.suffix.lower() in VIDEO_EXTENSIONS:
|
| 57 |
+
candidates.append(path)
|
| 58 |
+
if candidates:
|
| 59 |
+
return sorted(candidates)[0]
|
| 60 |
+
return None
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def iter_partial_download_files(video_id: str, *dirs: Path | None) -> Iterator[Path]:
|
| 64 |
+
seen: set[Path] = set()
|
| 65 |
+
for directory in existing_raw_dirs(*dirs):
|
| 66 |
+
if not directory.exists():
|
| 67 |
+
continue
|
| 68 |
+
for path in directory.glob(f"{video_id}*"):
|
| 69 |
+
if not path.is_file():
|
| 70 |
+
continue
|
| 71 |
+
suffixes = set(path.suffixes)
|
| 72 |
+
if '.part' in suffixes or '.ytdl' in suffixes or path.suffix in {'.part', '.ytdl'}:
|
| 73 |
+
resolved = path.resolve()
|
| 74 |
+
if resolved in seen:
|
| 75 |
+
continue
|
| 76 |
+
seen.add(resolved)
|
| 77 |
+
yield path
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def cleanup_partial_downloads(video_id: str, *dirs: Path | None) -> None:
|
| 81 |
+
for partial_path in iter_partial_download_files(video_id, *dirs):
|
| 82 |
+
partial_path.unlink(missing_ok=True)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _count_reservations(reservation_dir: Path | None, pool_name: str) -> int:
|
| 86 |
+
if reservation_dir is None:
|
| 87 |
+
return 0
|
| 88 |
+
pool_dir = reservation_dir / pool_name
|
| 89 |
+
if not pool_dir.exists():
|
| 90 |
+
return 0
|
| 91 |
+
return sum(1 for path in pool_dir.iterdir() if path.is_file() and path.suffix == ".reserve")
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _create_reservation(reservation_dir: Path | None, pool_name: str, reservation_key: str | None) -> Path | None:
|
| 95 |
+
if reservation_dir is None or not reservation_key:
|
| 96 |
+
return None
|
| 97 |
+
pool_dir = reservation_dir / pool_name
|
| 98 |
+
pool_dir.mkdir(parents=True, exist_ok=True)
|
| 99 |
+
reservation_path = pool_dir / f"{reservation_key}.reserve"
|
| 100 |
+
reservation_path.write_text(f"pool={pool_name}\nkey={reservation_key}\n", encoding="utf-8")
|
| 101 |
+
return reservation_path
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def release_download_reservation(reservation_path: Path | None) -> None:
|
| 105 |
+
if reservation_path is not None:
|
| 106 |
+
reservation_path.unlink(missing_ok=True)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def choose_download_target(
|
| 110 |
+
primary_dir: Path,
|
| 111 |
+
scratch_dir: Path | None,
|
| 112 |
+
primary_limit: int,
|
| 113 |
+
scratch_limit: int,
|
| 114 |
+
reservation_dir: Path | None = None,
|
| 115 |
+
reservation_key: str | None = None,
|
| 116 |
+
) -> tuple[Path, Path | None]:
|
| 117 |
+
primary_dir.mkdir(parents=True, exist_ok=True)
|
| 118 |
+
if reservation_dir is None:
|
| 119 |
+
primary_count = count_raw_videos(primary_dir)
|
| 120 |
+
if primary_count < primary_limit:
|
| 121 |
+
return primary_dir, None
|
| 122 |
+
if scratch_dir is None:
|
| 123 |
+
raise RuntimeError(
|
| 124 |
+
f"raw backlog full in primary pool ({primary_count}/{primary_limit}) and no scratch raw pool configured"
|
| 125 |
+
)
|
| 126 |
+
scratch_dir.mkdir(parents=True, exist_ok=True)
|
| 127 |
+
scratch_count = count_raw_videos(scratch_dir)
|
| 128 |
+
if scratch_count < scratch_limit:
|
| 129 |
+
return scratch_dir, None
|
| 130 |
+
raise RuntimeError(
|
| 131 |
+
f"raw backlog full in both pools: primary {primary_count}/{primary_limit}, scratch {scratch_count}/{scratch_limit}"
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
reservation_dir.mkdir(parents=True, exist_ok=True)
|
| 135 |
+
lock_path = reservation_dir / ".target_selection.lock"
|
| 136 |
+
with lock_path.open("a+", encoding="utf-8") as lock_handle:
|
| 137 |
+
fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
|
| 138 |
+
try:
|
| 139 |
+
primary_count = count_raw_videos(primary_dir) + _count_reservations(reservation_dir, "home")
|
| 140 |
+
if primary_count < primary_limit:
|
| 141 |
+
return primary_dir, _create_reservation(reservation_dir, "home", reservation_key)
|
| 142 |
+
if scratch_dir is None:
|
| 143 |
+
raise RuntimeError(
|
| 144 |
+
f"raw backlog full in primary pool ({primary_count}/{primary_limit}) and no scratch raw pool configured"
|
| 145 |
+
)
|
| 146 |
+
scratch_dir.mkdir(parents=True, exist_ok=True)
|
| 147 |
+
scratch_count = count_raw_videos(scratch_dir) + _count_reservations(reservation_dir, "scratch")
|
| 148 |
+
if scratch_count < scratch_limit:
|
| 149 |
+
return scratch_dir, _create_reservation(reservation_dir, "scratch", reservation_key)
|
| 150 |
+
raise RuntimeError(
|
| 151 |
+
f"raw backlog full in both pools: primary {primary_count}/{primary_limit}, scratch {scratch_count}/{scratch_limit}"
|
| 152 |
+
)
|
| 153 |
+
finally:
|
| 154 |
+
with contextlib.suppress(OSError):
|
| 155 |
+
fcntl.flock(lock_handle.fileno(), fcntl.LOCK_UN)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def remove_video_files(video_id: str, *dirs: Path | None) -> None:
|
| 159 |
+
for directory in existing_raw_dirs(*dirs):
|
| 160 |
+
if not directory.exists():
|
| 161 |
+
continue
|
| 162 |
+
for path in directory.glob(f"{video_id}.*"):
|
| 163 |
+
if path.is_file() and path.suffix.lower() in VIDEO_EXTENSIONS:
|
| 164 |
+
path.unlink(missing_ok=True)
|
utils/stats_npz.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
| 1 |
import fcntl
|
|
|
|
| 2 |
import os
|
| 3 |
import time
|
| 4 |
import zipfile
|
| 5 |
from pathlib import Path
|
| 6 |
-
from typing import Dict, Iterable
|
| 7 |
|
| 8 |
import numpy as np
|
| 9 |
|
|
@@ -80,8 +81,12 @@ def save_stats(stats_path: Path, stats: Dict[str, Dict[str, str]]) -> None:
|
|
| 80 |
for field in STATUS_FIELDS:
|
| 81 |
payload[field] = np.asarray([stats[video_id].get(field, "") for video_id in video_ids], dtype=object)
|
| 82 |
tmp_path = stats_path.parent / f".{stats_path.stem}.{os.getpid()}.tmp.npz"
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
|
| 87 |
def ensure_record(stats: Dict[str, Dict[str, str]], video_id: str) -> Dict[str, str]:
|
|
@@ -132,3 +137,128 @@ def update_many_video_stats_with_retry(stats_path: Path, video_ids: Iterable[str
|
|
| 132 |
if last_error is not None:
|
| 133 |
raise last_error
|
| 134 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import fcntl
|
| 2 |
+
import json
|
| 3 |
import os
|
| 4 |
import time
|
| 5 |
import zipfile
|
| 6 |
from pathlib import Path
|
| 7 |
+
from typing import Dict, Iterable, Sequence
|
| 8 |
|
| 9 |
import numpy as np
|
| 10 |
|
|
|
|
| 81 |
for field in STATUS_FIELDS:
|
| 82 |
payload[field] = np.asarray([stats[video_id].get(field, "") for video_id in video_ids], dtype=object)
|
| 83 |
tmp_path = stats_path.parent / f".{stats_path.stem}.{os.getpid()}.tmp.npz"
|
| 84 |
+
try:
|
| 85 |
+
np.savez(tmp_path, **payload)
|
| 86 |
+
os.replace(tmp_path, stats_path)
|
| 87 |
+
finally:
|
| 88 |
+
if tmp_path.exists():
|
| 89 |
+
tmp_path.unlink(missing_ok=True)
|
| 90 |
|
| 91 |
|
| 92 |
def ensure_record(stats: Dict[str, Dict[str, str]], video_id: str) -> Dict[str, str]:
|
|
|
|
| 137 |
if last_error is not None:
|
| 138 |
raise last_error
|
| 139 |
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def journal_lock_path(journal_path: Path) -> Path:
|
| 143 |
+
return journal_path.with_suffix(journal_path.suffix + ".lock")
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def sidecar_status_dir(stats_path: Path) -> Path:
|
| 147 |
+
return stats_path.parent / "video_status"
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def sidecar_status_path(stats_path: Path, video_id: str) -> Path:
|
| 151 |
+
return sidecar_status_dir(stats_path) / f"{video_id}.jsonl"
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def append_status_journal(journal_path: Path, video_ids: Sequence[str], **updates: str) -> None:
|
| 155 |
+
journal_path.parent.mkdir(parents=True, exist_ok=True)
|
| 156 |
+
payload = {
|
| 157 |
+
"video_ids": list(video_ids),
|
| 158 |
+
"updates": {key: ("" if value is None else str(value)) for key, value in updates.items()},
|
| 159 |
+
"recorded_at": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 160 |
+
}
|
| 161 |
+
lock_path = journal_lock_path(journal_path)
|
| 162 |
+
with lock_path.open("a+", encoding="utf-8") as handle:
|
| 163 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
|
| 164 |
+
try:
|
| 165 |
+
with journal_path.open("a", encoding="utf-8") as journal_handle:
|
| 166 |
+
journal_handle.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
| 167 |
+
finally:
|
| 168 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def append_status_sidecar(stats_path: Path, video_id: str, **updates: str) -> None:
|
| 172 |
+
payload = {
|
| 173 |
+
"video_ids": [video_id],
|
| 174 |
+
"updates": {key: ("" if value is None else str(value)) for key, value in updates.items()},
|
| 175 |
+
"recorded_at": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 176 |
+
}
|
| 177 |
+
sidecar_path = sidecar_status_path(stats_path, video_id)
|
| 178 |
+
sidecar_path.parent.mkdir(parents=True, exist_ok=True)
|
| 179 |
+
lock_path = journal_lock_path(sidecar_path)
|
| 180 |
+
with lock_path.open("a+", encoding="utf-8") as handle:
|
| 181 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
|
| 182 |
+
try:
|
| 183 |
+
with sidecar_path.open("a", encoding="utf-8") as sidecar_handle:
|
| 184 |
+
sidecar_handle.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
| 185 |
+
finally:
|
| 186 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def update_video_stats_best_effort(stats_path: Path, journal_path: Path, video_id: str, **updates: str) -> Dict[str, str]:
|
| 190 |
+
append_status_sidecar(stats_path, video_id, **updates)
|
| 191 |
+
return {key: ("" if value is None else str(value)) for key, value in updates.items() if key in STATUS_FIELDS}
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def update_many_video_stats_best_effort(stats_path: Path, journal_path: Path, video_ids: Sequence[str], **updates: str) -> None:
|
| 195 |
+
try:
|
| 196 |
+
update_many_video_stats_with_retry(stats_path, video_ids, **updates)
|
| 197 |
+
except Exception:
|
| 198 |
+
append_status_journal(journal_path, video_ids, **updates)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def _apply_payloads_to_stats(stats_path: Path, payloads: Sequence[dict]) -> int:
|
| 202 |
+
if not payloads:
|
| 203 |
+
return 0
|
| 204 |
+
update_count = 0
|
| 205 |
+
stats_lock_path = _lock_path(stats_path)
|
| 206 |
+
stats_lock_path.parent.mkdir(parents=True, exist_ok=True)
|
| 207 |
+
with stats_lock_path.open("a+", encoding="utf-8") as stats_handle:
|
| 208 |
+
fcntl.flock(stats_handle.fileno(), fcntl.LOCK_EX)
|
| 209 |
+
try:
|
| 210 |
+
stats = _load_stats_unlocked(stats_path)
|
| 211 |
+
for payload in payloads:
|
| 212 |
+
video_ids = [str(item) for item in payload.get("video_ids", []) if str(item)]
|
| 213 |
+
updates = payload.get("updates", {})
|
| 214 |
+
if not video_ids or not isinstance(updates, dict):
|
| 215 |
+
continue
|
| 216 |
+
for video_id in video_ids:
|
| 217 |
+
record = ensure_record(stats, video_id)
|
| 218 |
+
for key, value in updates.items():
|
| 219 |
+
if key in STATUS_FIELDS:
|
| 220 |
+
record[key] = "" if value is None else str(value)
|
| 221 |
+
update_count += 1
|
| 222 |
+
if update_count:
|
| 223 |
+
save_stats(stats_path, stats)
|
| 224 |
+
finally:
|
| 225 |
+
fcntl.flock(stats_handle.fileno(), fcntl.LOCK_UN)
|
| 226 |
+
return update_count
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def apply_status_journal_to_stats(stats_path: Path, journal_path: Path, remove_applied: bool = True) -> int:
|
| 230 |
+
payloads = []
|
| 231 |
+
|
| 232 |
+
if journal_path.exists() and journal_path.stat().st_size > 0:
|
| 233 |
+
lock_path = journal_lock_path(journal_path)
|
| 234 |
+
with lock_path.open("a+", encoding="utf-8") as handle:
|
| 235 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
|
| 236 |
+
try:
|
| 237 |
+
if journal_path.exists() and journal_path.stat().st_size > 0:
|
| 238 |
+
for line in journal_path.read_text(encoding="utf-8").splitlines():
|
| 239 |
+
if line.strip():
|
| 240 |
+
payloads.append(json.loads(line))
|
| 241 |
+
if remove_applied:
|
| 242 |
+
journal_path.unlink(missing_ok=True)
|
| 243 |
+
finally:
|
| 244 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
|
| 245 |
+
|
| 246 |
+
sidecar_dir = sidecar_status_dir(stats_path)
|
| 247 |
+
if sidecar_dir.exists():
|
| 248 |
+
for sidecar_path in sorted(sidecar_dir.glob("*.jsonl")):
|
| 249 |
+
lock_path = journal_lock_path(sidecar_path)
|
| 250 |
+
with lock_path.open("a+", encoding="utf-8") as handle:
|
| 251 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
|
| 252 |
+
try:
|
| 253 |
+
if sidecar_path.exists() and sidecar_path.stat().st_size > 0:
|
| 254 |
+
for line in sidecar_path.read_text(encoding="utf-8").splitlines():
|
| 255 |
+
if line.strip():
|
| 256 |
+
payloads.append(json.loads(line))
|
| 257 |
+
if remove_applied:
|
| 258 |
+
sidecar_path.unlink(missing_ok=True)
|
| 259 |
+
finally:
|
| 260 |
+
fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
|
| 261 |
+
if remove_applied:
|
| 262 |
+
lock_path.unlink(missing_ok=True)
|
| 263 |
+
|
| 264 |
+
return _apply_payloads_to_stats(stats_path, payloads)
|