Sen Fang commited on
Commit
5dced4c
·
1 Parent(s): 142e0ee

Update orchestration, CPU download scheduling, and upload/runtime fixes

Browse files
reproduce_independently.sh CHANGED
@@ -24,7 +24,8 @@ LIMIT=""
24
  VIDEO_IDS=()
25
  FPS="24"
26
  WORKERS=""
27
- TARGET_BYTES="$((14 * 1024 * 1024 * 1024))"
 
28
  DOWNLOAD_BATCH_SIZE="1"
29
  PROCESS_BATCH_SIZE=""
30
  RAW_BACKLOG_LIMIT="340"
@@ -57,6 +58,7 @@ Options:
57
  --fps N
58
  --workers N
59
  --target-bytes N
 
60
  --download-batch-size N
61
  --process-batch-size N
62
  --raw-backlog-limit N
@@ -300,6 +302,7 @@ run_upload_stage() {
300
  --stats-npz "$STATS_NPZ"
301
  --repo-id "$REPO_ID"
302
  --target-bytes "$TARGET_BYTES"
 
303
  )
304
 
305
  if [[ "$require_target" == "1" ]]; then
@@ -511,7 +514,7 @@ upload_loop() {
511
  continue
512
  fi
513
 
514
- if [[ "$complete_pending_upload_bytes" -lt "$TARGET_BYTES" && ( "$pending_download" -gt 0 || "$pending_process" -gt 0 ) ]]; then
515
  sleep "$IDLE_SLEEP_SECONDS"
516
  continue
517
  fi
 
24
  VIDEO_IDS=()
25
  FPS="24"
26
  WORKERS=""
27
+ TARGET_BYTES="$((10 * 1024 * 1024 * 1024))"
28
+ TARGET_FOLDERS="${TARGET_FOLDERS:-40}"
29
  DOWNLOAD_BATCH_SIZE="1"
30
  PROCESS_BATCH_SIZE=""
31
  RAW_BACKLOG_LIMIT="340"
 
58
  --fps N
59
  --workers N
60
  --target-bytes N
61
+ --target-folders N
62
  --download-batch-size N
63
  --process-batch-size N
64
  --raw-backlog-limit N
 
302
  --stats-npz "$STATS_NPZ"
303
  --repo-id "$REPO_ID"
304
  --target-bytes "$TARGET_BYTES"
305
+ --target-folders "$TARGET_FOLDERS"
306
  )
307
 
308
  if [[ "$require_target" == "1" ]]; then
 
514
  continue
515
  fi
516
 
517
+ if [[ "$complete_pending_upload_bytes" -lt "$TARGET_BYTES" && "$complete_pending_upload" -lt "$TARGET_FOLDERS" && ( "$pending_download" -gt 0 || "$pending_process" -gt 0 ) ]]; then
518
  sleep "$IDLE_SLEEP_SECONDS"
519
  continue
520
  fi
reproduce_independently_slurm.sh CHANGED
@@ -29,27 +29,39 @@ LIMIT="${LIMIT:-}"
29
  VIDEO_IDS=()
30
  FPS="${FPS:-24}"
31
  WORKERS="${WORKERS:-}"
32
- TARGET_BYTES="${TARGET_BYTES:-$((14 * 1024 * 1024 * 1024))}"
 
33
  DOWNLOAD_BATCH_SIZE="${DOWNLOAD_BATCH_SIZE:-1}"
 
 
34
  PROCESS_BATCH_SIZE="${PROCESS_BATCH_SIZE:-}"
35
  MIN_PROCESS_START_BACKLOG="${MIN_PROCESS_START_BACKLOG:-4}"
36
- RAW_BACKLOG_LIMIT="${RAW_BACKLOG_LIMIT:-340}"
37
  MAX_RAW_VIDEO_BYTES="${MAX_RAW_VIDEO_BYTES:-0}"
38
  MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
39
  IDLE_SLEEP_SECONDS="${IDLE_SLEEP_SECONDS:-5}"
40
  REPO_ID="${REPO_ID:-SignerX/Sign-DWPose-2M}"
41
- COOKIES_FILE="${COOKIES_FILE:-}"
42
  COOKIES_FROM_BROWSER="${COOKIES_FROM_BROWSER:-}"
43
  EXTRACTOR_ARGS="${EXTRACTOR_ARGS:-}"
44
  SLURM_PROCESS_SUBMIT_SCRIPT="${SLURM_PROCESS_SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_dwpose_slurm.sh}"
45
- GPU_PARTITIONS="${GPU_PARTITIONS:-gpu}"
 
46
  GPU_ACCOUNT="${GPU_ACCOUNT:-}"
47
  ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
48
- MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-340}"
 
 
 
 
 
 
 
 
49
  ORCHESTRATOR_PARTITION="${ORCHESTRATOR_PARTITION:-main}"
50
  ORCHESTRATOR_ACCOUNT="${ORCHESTRATOR_ACCOUNT:-}"
51
  ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-24:00:00}"
52
- ORCHESTRATOR_CPUS_PER_TASK="${ORCHESTRATOR_CPUS_PER_TASK:-2}"
53
  ORCHESTRATOR_MEM="${ORCHESTRATOR_MEM:-8G}"
54
  RUN_LOCAL="${RUN_LOCAL:-0}"
55
 
@@ -87,8 +99,16 @@ Options:
87
  --orchestrator-mem SIZE
88
  --run-local
89
  --target-bytes N
 
90
  --download-batch-size N
91
  --process-batch-size N
 
 
 
 
 
 
 
92
  --raw-backlog-limit N
93
  --max-raw-video-bytes N
94
  --max-iterations N
@@ -110,7 +130,7 @@ Examples:
110
  bash reproduce_independently_slurm.sh --stage download --limit 10 --skip-video-download
111
  bash reproduce_independently_slurm.sh --stage process --video-id Bdj5MUf_3Hc
112
  bash reproduce_independently_slurm.sh --stage upload --target-bytes 500000000
113
- bash reproduce_independently_slurm.sh --stage all --gpu-partitions gpu,gpu-redhat,cgpu --array-parallel 128
114
  bash reproduce_independently_slurm.sh --stage all --run-local
115
  EOF
116
  }
@@ -202,6 +222,38 @@ while [[ $# -gt 0 ]]; do
202
  DOWNLOAD_BATCH_SIZE="$2"
203
  shift 2
204
  ;;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  --process-batch-size)
206
  PROCESS_BATCH_SIZE="$2"
207
  shift 2
@@ -294,7 +346,7 @@ if [[ -z "${SLURM_JOB_ID:-}" && "$RUN_LOCAL" != "1" ]]; then
294
  echo "Missing orchestration wrapper: $wrapper" >&2
295
  exit 1
296
  fi
297
- export_args="ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,SOURCE_METADATA_CSV=$SOURCE_METADATA_CSV,OUTPUT_METADATA_CSV=$OUTPUT_METADATA_CSV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,RAW_CAPTION_DIR=$RAW_CAPTION_DIR,RAW_METADATA_DIR=$RAW_METADATA_DIR,DATASET_DIR=$DATASET_DIR,ARCHIVE_DIR=$ARCHIVE_DIR,STATS_NPZ=$STATS_NPZ,PROGRESS_JSON=$PROGRESS_JSON,STAGE=$STAGE,LIMIT=$LIMIT,FPS=$FPS,WORKERS=$WORKERS,TARGET_BYTES=$TARGET_BYTES,DOWNLOAD_BATCH_SIZE=$DOWNLOAD_BATCH_SIZE,PROCESS_BATCH_SIZE=$PROCESS_BATCH_SIZE,MIN_PROCESS_START_BACKLOG=$MIN_PROCESS_START_BACKLOG,RAW_BACKLOG_LIMIT=$RAW_BACKLOG_LIMIT,MAX_RAW_VIDEO_BYTES=$MAX_RAW_VIDEO_BYTES,MAX_ITERATIONS=$MAX_ITERATIONS,IDLE_SLEEP_SECONDS=$IDLE_SLEEP_SECONDS,REPO_ID=$REPO_ID,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS,GPU_PARTITIONS=$GPU_PARTITIONS,GPU_ACCOUNT=$GPU_ACCOUNT,ARRAY_PARALLEL=$ARRAY_PARALLEL,MAX_BACKLOG_VIDEOS=$MAX_BACKLOG_VIDEOS,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,FORCE_PROCESS=$FORCE_PROCESS,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,DRY_RUN_UPLOAD=$DRY_RUN_UPLOAD,RUN_LOCAL=1"
298
  if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
299
  export VIDEO_IDS_JOINED
300
  VIDEO_IDS_JOINED="${VIDEO_IDS[*]}"
@@ -334,48 +386,119 @@ run_in_dwpose() {
334
 
335
  run_download_stage() {
336
  local stage_limit="${1:-$LIMIT}"
337
- local cmd=(python "$PIPELINE01"
338
- --source-metadata-csv "$SOURCE_METADATA_CSV"
339
- --output-metadata-csv "$OUTPUT_METADATA_CSV"
340
- --raw-video-dir "$RAW_VIDEO_DIR"
341
- --raw-caption-dir "$RAW_CAPTION_DIR"
342
- --raw-metadata-dir "$RAW_METADATA_DIR"
343
- --dataset-dir "$DATASET_DIR"
344
- --stats-npz "$STATS_NPZ"
345
- )
346
 
347
- if [[ -n "$stage_limit" ]]; then
348
- cmd+=(--limit "$stage_limit")
349
- fi
350
- if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
351
- cmd+=(--video-ids "${VIDEO_IDS[@]}")
352
- fi
353
- if [[ $FORCE_METADATA -eq 1 ]]; then
354
- cmd+=(--force-metadata)
355
- fi
356
- if [[ $FORCE_SUBTITLES -eq 1 ]]; then
357
- cmd+=(--force-subtitles)
358
- fi
359
- if [[ $FORCE_DOWNLOAD -eq 1 ]]; then
360
- cmd+=(--force-download)
361
- fi
362
- if [[ $SKIP_VIDEO_DOWNLOAD -eq 1 ]]; then
363
- cmd+=(--skip-video-download)
364
- fi
365
- if [[ $SKIP_SUBTITLES -eq 1 ]]; then
366
- cmd+=(--skip-subtitles)
367
- fi
368
- if [[ -n "$COOKIES_FROM_BROWSER" ]]; then
369
- cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER")
370
- fi
371
- if [[ -n "$COOKIES_FILE" ]]; then
372
- cmd+=(--cookies "$COOKIES_FILE")
373
- fi
374
- if [[ -n "$EXTRACTOR_ARGS" ]]; then
375
- cmd+=(--extractor-args "$EXTRACTOR_ARGS")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  fi
377
 
378
- run_in_dwpose "${cmd[@]}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
  }
380
 
381
  RUN_PROCESS_STAGE_SUBMITTED_COUNT=0
@@ -424,6 +547,7 @@ run_upload_stage() {
424
  --stats-npz "$STATS_NPZ"
425
  --repo-id "$REPO_ID"
426
  --target-bytes "$TARGET_BYTES"
 
427
  )
428
 
429
  if [[ "$require_target" == "1" ]]; then
@@ -441,43 +565,33 @@ prune_processed_raw_videos() {
441
  from pathlib import Path
442
  raw_dir = Path("$RAW_VIDEO_DIR")
443
  dataset_dir = Path("$DATASET_DIR")
444
- deleted = 0
 
445
  if raw_dir.exists():
446
  for video_path in raw_dir.iterdir():
447
- if not video_path.is_file():
448
  continue
449
- marker = dataset_dir / video_path.stem / "npz" / ".complete"
450
- if marker.exists():
451
  video_path.unlink(missing_ok=True)
452
- deleted += 1
453
- print(deleted)
454
  PY
455
  }
456
 
457
- dir_size_bytes() {
458
- local dir_path="$1"
459
- if [[ ! -d "$dir_path" ]]; then
460
- echo 0
461
- return
462
- fi
463
- find "$dir_path" -type f -printf '%s\n' | awk '{sum+=$1} END {print sum+0}'
464
- }
465
-
466
  count_pending_downloads() {
467
  python - <<PY
468
- import csv, sys
469
  from pathlib import Path
470
- csv.field_size_limit(min(sys.maxsize, 10 * 1024 * 1024))
471
- path = Path("$OUTPUT_METADATA_CSV")
472
- if not path.exists():
473
- path = Path("$SOURCE_METADATA_CSV")
474
  pending = 0
475
- with path.open("r", encoding="utf-8-sig", newline="") as handle:
476
- reader = csv.DictReader(handle)
477
- for row in reader:
478
- if (row.get("download_status") or "").strip() == "ok":
479
- continue
480
- pending += 1
 
481
  print(pending)
482
  PY
483
  }
@@ -486,20 +600,81 @@ count_pending_process() {
486
  python - <<PY
487
  from pathlib import Path
488
  raw_dir = Path("$RAW_VIDEO_DIR")
 
489
  pending = 0
490
  if raw_dir.exists():
491
  for video_path in raw_dir.iterdir():
492
- if video_path.is_file():
493
  pending += 1
494
  print(pending)
495
  PY
496
  }
497
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
498
  count_active_process_claims() {
499
  python - <<PY
500
  import subprocess
501
  from pathlib import Path
502
- claim_dir = Path("$ROOT_DIR/slurm/state/claims")
503
  claim_dir.mkdir(parents=True, exist_ok=True)
504
  try:
505
  result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%A"], check=True, capture_output=True, text=True)
@@ -535,13 +710,14 @@ uploaded = set()
535
  if progress_path.exists():
536
  uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys())
537
  count = 0
538
- for folder_path in dataset_dir.iterdir():
539
- if not folder_path.is_dir():
540
- continue
541
- if folder_path.name in uploaded:
542
- continue
543
- if (folder_path / "npz" / ".complete").exists():
544
- count += 1
 
545
  print(count)
546
  PY
547
  }
@@ -556,16 +732,20 @@ uploaded = set()
556
  if progress_path.exists():
557
  uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys())
558
  total = 0
559
- for folder_path in dataset_dir.iterdir():
560
- if not folder_path.is_dir():
561
- continue
562
- if folder_path.name in uploaded:
563
- continue
564
- if not (folder_path / "npz" / ".complete").exists():
565
- continue
566
- for path in folder_path.rglob("*"):
567
- if path.is_file():
568
- total += path.stat().st_size
 
 
 
 
569
  print(total)
570
  PY
571
  }
@@ -574,13 +754,14 @@ download_loop() {
574
  local iteration=0
575
  while true; do
576
  iteration=$((iteration + 1))
577
- local pruned
578
  pruned="$(prune_processed_raw_videos)"
 
579
  local pending_download pending_process raw_video_bytes
580
  pending_download="$(count_pending_downloads)"
581
  pending_process="$(count_pending_process)"
582
  raw_video_bytes="$(dir_size_bytes "$RAW_VIDEO_DIR")"
583
- echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned"
584
 
585
  if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
586
  echo "[download] reached max iterations: $MAX_ITERATIONS"
@@ -699,7 +880,7 @@ upload_loop() {
699
  continue
700
  fi
701
 
702
- if [[ "$complete_pending_upload_bytes" -lt "$TARGET_BYTES" && ( "$pending_download" -gt 0 || "$pending_process" -gt 0 ) ]]; then
703
  sleep "$IDLE_SLEEP_SECONDS"
704
  continue
705
  fi
 
29
  VIDEO_IDS=()
30
  FPS="${FPS:-24}"
31
  WORKERS="${WORKERS:-}"
32
+ TARGET_BYTES="${TARGET_BYTES:-$((10 * 1024 * 1024 * 1024))}"
33
+ TARGET_FOLDERS="${TARGET_FOLDERS:-40}"
34
  DOWNLOAD_BATCH_SIZE="${DOWNLOAD_BATCH_SIZE:-1}"
35
+ DOWNLOAD_WORKERS="${DOWNLOAD_WORKERS:-4}"
36
+ USE_SLURM_DOWNLOAD="${USE_SLURM_DOWNLOAD:-1}"
37
  PROCESS_BATCH_SIZE="${PROCESS_BATCH_SIZE:-}"
38
  MIN_PROCESS_START_BACKLOG="${MIN_PROCESS_START_BACKLOG:-4}"
39
+ RAW_BACKLOG_LIMIT="${RAW_BACKLOG_LIMIT:-180}"
40
  MAX_RAW_VIDEO_BYTES="${MAX_RAW_VIDEO_BYTES:-0}"
41
  MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
42
  IDLE_SLEEP_SECONDS="${IDLE_SLEEP_SECONDS:-5}"
43
  REPO_ID="${REPO_ID:-SignerX/Sign-DWPose-2M}"
44
+ COOKIES_FILE="${COOKIES_FILE:-$ROOT_DIR/www.youtube.com_cookies (2).txt}"
45
  COOKIES_FROM_BROWSER="${COOKIES_FROM_BROWSER:-}"
46
  EXTRACTOR_ARGS="${EXTRACTOR_ARGS:-}"
47
  SLURM_PROCESS_SUBMIT_SCRIPT="${SLURM_PROCESS_SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_dwpose_slurm.sh}"
48
+ SLURM_DOWNLOAD_SUBMIT_SCRIPT="${SLURM_DOWNLOAD_SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_download_slurm.sh}"
49
+ GPU_PARTITIONS="gpu,gpu-redhat,cgpu"
50
  GPU_ACCOUNT="${GPU_ACCOUNT:-}"
51
  ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
52
+ MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-180}"
53
+ DOWNLOAD_CLAIM_DIR="${DOWNLOAD_CLAIM_DIR:-$STATE_ROOT/slurm/state/download_claims}"
54
+ DOWNLOAD_CSV_LOCK_PATH="${DOWNLOAD_CSV_LOCK_PATH:-$RUNTIME_ROOT/Sign-DWPose-2M-metadata_processed.csv.lock}"
55
+ DOWNLOAD_PARTITIONS="${DOWNLOAD_PARTITIONS:-main}"
56
+ DOWNLOAD_ACCOUNT="${DOWNLOAD_ACCOUNT:-}"
57
+ DOWNLOAD_TIME="${DOWNLOAD_TIME:-04:00:00}"
58
+ DOWNLOAD_CPUS_PER_TASK="${DOWNLOAD_CPUS_PER_TASK:-1}"
59
+ DOWNLOAD_MEM="${DOWNLOAD_MEM:-4G}"
60
+ DOWNLOAD_ARRAY_PARALLEL="${DOWNLOAD_ARRAY_PARALLEL:-32}"
61
  ORCHESTRATOR_PARTITION="${ORCHESTRATOR_PARTITION:-main}"
62
  ORCHESTRATOR_ACCOUNT="${ORCHESTRATOR_ACCOUNT:-}"
63
  ORCHESTRATOR_TIME="${ORCHESTRATOR_TIME:-24:00:00}"
64
+ ORCHESTRATOR_CPUS_PER_TASK="${ORCHESTRATOR_CPUS_PER_TASK:-4}"
65
  ORCHESTRATOR_MEM="${ORCHESTRATOR_MEM:-8G}"
66
  RUN_LOCAL="${RUN_LOCAL:-0}"
67
 
 
99
  --orchestrator-mem SIZE
100
  --run-local
101
  --target-bytes N
102
+ --target-folders N
103
  --download-batch-size N
104
  --process-batch-size N
105
+ --use-slurm-download {0,1}
106
+ --download-partitions P1[,P2,...]
107
+ --download-account NAME
108
+ --download-time HH:MM:SS
109
+ --download-cpus-per-task N
110
+ --download-mem SIZE
111
+ --download-array-parallel N
112
  --raw-backlog-limit N
113
  --max-raw-video-bytes N
114
  --max-iterations N
 
130
  bash reproduce_independently_slurm.sh --stage download --limit 10 --skip-video-download
131
  bash reproduce_independently_slurm.sh --stage process --video-id Bdj5MUf_3Hc
132
  bash reproduce_independently_slurm.sh --stage upload --target-bytes 500000000
133
+ bash reproduce_independently_slurm.sh
134
  bash reproduce_independently_slurm.sh --stage all --run-local
135
  EOF
136
  }
 
222
  DOWNLOAD_BATCH_SIZE="$2"
223
  shift 2
224
  ;;
225
+ --download-workers)
226
+ DOWNLOAD_WORKERS="$2"
227
+ shift 2
228
+ ;;
229
+ --use-slurm-download)
230
+ USE_SLURM_DOWNLOAD="$2"
231
+ shift 2
232
+ ;;
233
+ --download-partitions)
234
+ DOWNLOAD_PARTITIONS="$2"
235
+ shift 2
236
+ ;;
237
+ --download-account)
238
+ DOWNLOAD_ACCOUNT="$2"
239
+ shift 2
240
+ ;;
241
+ --download-time)
242
+ DOWNLOAD_TIME="$2"
243
+ shift 2
244
+ ;;
245
+ --download-cpus-per-task)
246
+ DOWNLOAD_CPUS_PER_TASK="$2"
247
+ shift 2
248
+ ;;
249
+ --download-mem)
250
+ DOWNLOAD_MEM="$2"
251
+ shift 2
252
+ ;;
253
+ --download-array-parallel)
254
+ DOWNLOAD_ARRAY_PARALLEL="$2"
255
+ shift 2
256
+ ;;
257
  --process-batch-size)
258
  PROCESS_BATCH_SIZE="$2"
259
  shift 2
 
346
  echo "Missing orchestration wrapper: $wrapper" >&2
347
  exit 1
348
  fi
349
+ export_args="ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,SOURCE_METADATA_CSV=$SOURCE_METADATA_CSV,OUTPUT_METADATA_CSV=$OUTPUT_METADATA_CSV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,RAW_CAPTION_DIR=$RAW_CAPTION_DIR,RAW_METADATA_DIR=$RAW_METADATA_DIR,DATASET_DIR=$DATASET_DIR,ARCHIVE_DIR=$ARCHIVE_DIR,STATS_NPZ=$STATS_NPZ,PROGRESS_JSON=$PROGRESS_JSON,STAGE=$STAGE,LIMIT=$LIMIT,FPS=$FPS,WORKERS=$WORKERS,TARGET_BYTES=$TARGET_BYTES,DOWNLOAD_BATCH_SIZE=$DOWNLOAD_BATCH_SIZE,DOWNLOAD_WORKERS=$DOWNLOAD_WORKERS,USE_SLURM_DOWNLOAD=$USE_SLURM_DOWNLOAD,SLURM_DOWNLOAD_SUBMIT_SCRIPT=$SLURM_DOWNLOAD_SUBMIT_SCRIPT,DOWNLOAD_PARTITIONS=$DOWNLOAD_PARTITIONS,DOWNLOAD_ACCOUNT=$DOWNLOAD_ACCOUNT,DOWNLOAD_TIME=$DOWNLOAD_TIME,DOWNLOAD_CPUS_PER_TASK=$DOWNLOAD_CPUS_PER_TASK,DOWNLOAD_MEM=$DOWNLOAD_MEM,DOWNLOAD_ARRAY_PARALLEL=$DOWNLOAD_ARRAY_PARALLEL,PROCESS_BATCH_SIZE=$PROCESS_BATCH_SIZE,DOWNLOAD_CLAIM_DIR=$DOWNLOAD_CLAIM_DIR,DOWNLOAD_CSV_LOCK_PATH=$DOWNLOAD_CSV_LOCK_PATH,MIN_PROCESS_START_BACKLOG=$MIN_PROCESS_START_BACKLOG,RAW_BACKLOG_LIMIT=$RAW_BACKLOG_LIMIT,MAX_RAW_VIDEO_BYTES=$MAX_RAW_VIDEO_BYTES,MAX_ITERATIONS=$MAX_ITERATIONS,IDLE_SLEEP_SECONDS=$IDLE_SLEEP_SECONDS,REPO_ID=$REPO_ID,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS,GPU_PARTITIONS=$GPU_PARTITIONS,GPU_ACCOUNT=$GPU_ACCOUNT,ARRAY_PARALLEL=$ARRAY_PARALLEL,MAX_BACKLOG_VIDEOS=$MAX_BACKLOG_VIDEOS,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,FORCE_PROCESS=$FORCE_PROCESS,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,DRY_RUN_UPLOAD=$DRY_RUN_UPLOAD,RUN_LOCAL=1"
350
  if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
351
  export VIDEO_IDS_JOINED
352
  VIDEO_IDS_JOINED="${VIDEO_IDS[*]}"
 
386
 
387
  run_download_stage() {
388
  local stage_limit="${1:-$LIMIT}"
 
 
 
 
 
 
 
 
 
389
 
390
+ if [[ "$USE_SLURM_DOWNLOAD" == "1" ]]; then
391
+ local cmd=(bash "$SLURM_DOWNLOAD_SUBMIT_SCRIPT"
392
+ --partitions "$DOWNLOAD_PARTITIONS"
393
+ --runtime-root "$RUNTIME_ROOT"
394
+ --state-root "$STATE_ROOT"
395
+ --time "$DOWNLOAD_TIME"
396
+ --cpus-per-task "$DOWNLOAD_CPUS_PER_TASK"
397
+ --mem "$DOWNLOAD_MEM"
398
+ --max-backlog-videos "$RAW_BACKLOG_LIMIT"
399
+ --workers "$DOWNLOAD_WORKERS"
400
+ --claim-dir "$DOWNLOAD_CLAIM_DIR"
401
+ --csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH"
402
+ )
403
+
404
+ if [[ -n "$DOWNLOAD_ARRAY_PARALLEL" ]]; then
405
+ cmd+=(--array-parallel "$DOWNLOAD_ARRAY_PARALLEL")
406
+ fi
407
+ if [[ -n "$DOWNLOAD_ACCOUNT" ]]; then
408
+ cmd+=(--account "$DOWNLOAD_ACCOUNT")
409
+ fi
410
+ if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
411
+ cmd+=(--video-ids "${VIDEO_IDS[@]}")
412
+ fi
413
+ if [[ $FORCE_METADATA -eq 1 ]]; then
414
+ cmd+=(--force-metadata)
415
+ fi
416
+ if [[ $FORCE_SUBTITLES -eq 1 ]]; then
417
+ cmd+=(--force-subtitles)
418
+ fi
419
+ if [[ $FORCE_DOWNLOAD -eq 1 ]]; then
420
+ cmd+=(--force-download)
421
+ fi
422
+ if [[ $SKIP_VIDEO_DOWNLOAD -eq 1 ]]; then
423
+ cmd+=(--skip-video-download)
424
+ fi
425
+ if [[ $SKIP_SUBTITLES -eq 1 ]]; then
426
+ cmd+=(--skip-subtitles)
427
+ fi
428
+ if [[ -n "$COOKIES_FROM_BROWSER" ]]; then
429
+ cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER")
430
+ fi
431
+ if [[ -n "$COOKIES_FILE" ]]; then
432
+ cmd+=(--cookies "$COOKIES_FILE")
433
+ fi
434
+ if [[ -n "$EXTRACTOR_ARGS" ]]; then
435
+ cmd+=(--extractor-args "$EXTRACTOR_ARGS")
436
+ fi
437
+
438
+ "${cmd[@]}"
439
+ return $?
440
  fi
441
 
442
+ local worker_count="${DOWNLOAD_WORKERS:-1}"
443
+ local pids=()
444
+ local failed=0
445
+ local i
446
+
447
+ for ((i=1; i<=worker_count; i++)); do
448
+ local cmd=(python "$PIPELINE01"
449
+ --source-metadata-csv "$SOURCE_METADATA_CSV"
450
+ --output-metadata-csv "$OUTPUT_METADATA_CSV"
451
+ --raw-video-dir "$RAW_VIDEO_DIR"
452
+ --raw-caption-dir "$RAW_CAPTION_DIR"
453
+ --raw-metadata-dir "$RAW_METADATA_DIR"
454
+ --dataset-dir "$DATASET_DIR"
455
+ --stats-npz "$STATS_NPZ"
456
+ --claim-dir "$DOWNLOAD_CLAIM_DIR"
457
+ --csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH"
458
+ )
459
+
460
+ if [[ -n "$stage_limit" ]]; then
461
+ cmd+=(--limit "$stage_limit")
462
+ fi
463
+ if [[ ${#VIDEO_IDS[@]} -gt 0 ]]; then
464
+ cmd+=(--video-ids "${VIDEO_IDS[@]}")
465
+ fi
466
+ if [[ $FORCE_METADATA -eq 1 ]]; then
467
+ cmd+=(--force-metadata)
468
+ fi
469
+ if [[ $FORCE_SUBTITLES -eq 1 ]]; then
470
+ cmd+=(--force-subtitles)
471
+ fi
472
+ if [[ $FORCE_DOWNLOAD -eq 1 ]]; then
473
+ cmd+=(--force-download)
474
+ fi
475
+ if [[ $SKIP_VIDEO_DOWNLOAD -eq 1 ]]; then
476
+ cmd+=(--skip-video-download)
477
+ fi
478
+ if [[ $SKIP_SUBTITLES -eq 1 ]]; then
479
+ cmd+=(--skip-subtitles)
480
+ fi
481
+ if [[ -n "$COOKIES_FROM_BROWSER" ]]; then
482
+ cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER")
483
+ fi
484
+ if [[ -n "$COOKIES_FILE" ]]; then
485
+ cmd+=(--cookies "$COOKIES_FILE")
486
+ fi
487
+ if [[ -n "$EXTRACTOR_ARGS" ]]; then
488
+ cmd+=(--extractor-args "$EXTRACTOR_ARGS")
489
+ fi
490
+
491
+ run_in_dwpose "${cmd[@]}" &
492
+ pids+=("$!")
493
+ done
494
+
495
+ for pid in "${pids[@]}"; do
496
+ if ! wait "$pid"; then
497
+ failed=1
498
+ fi
499
+ done
500
+
501
+ return "$failed"
502
  }
503
 
504
  RUN_PROCESS_STAGE_SUBMITTED_COUNT=0
 
547
  --stats-npz "$STATS_NPZ"
548
  --repo-id "$REPO_ID"
549
  --target-bytes "$TARGET_BYTES"
550
+ --target-folders "$TARGET_FOLDERS"
551
  )
552
 
553
  if [[ "$require_target" == "1" ]]; then
 
565
  from pathlib import Path
566
  raw_dir = Path("$RAW_VIDEO_DIR")
567
  dataset_dir = Path("$DATASET_DIR")
568
+ video_extensions = {".mp4", ".mkv", ".webm", ".mov"}
569
+ removed = 0
570
  if raw_dir.exists():
571
  for video_path in raw_dir.iterdir():
572
+ if not video_path.is_file() or video_path.suffix.lower() not in video_extensions:
573
  continue
574
+ video_id = video_path.stem
575
+ if (dataset_dir / video_id / "npz" / ".complete").exists():
576
  video_path.unlink(missing_ok=True)
577
+ removed += 1
578
+ print(removed)
579
  PY
580
  }
581
 
 
 
 
 
 
 
 
 
 
582
  count_pending_downloads() {
583
  python - <<PY
584
+ import csv
585
  from pathlib import Path
586
+ csv_path = Path("$OUTPUT_METADATA_CSV")
 
 
 
587
  pending = 0
588
+ if csv_path.exists():
589
+ with csv_path.open("r", encoding="utf-8", newline="") as handle:
590
+ reader = csv.DictReader(handle)
591
+ for row in reader:
592
+ if (row.get("download_status") or "").strip() in {"ok", "skipped"}:
593
+ continue
594
+ pending += 1
595
  print(pending)
596
  PY
597
  }
 
600
  python - <<PY
601
  from pathlib import Path
602
  raw_dir = Path("$RAW_VIDEO_DIR")
603
+ video_extensions = {".mp4", ".mkv", ".webm", ".mov"}
604
  pending = 0
605
  if raw_dir.exists():
606
  for video_path in raw_dir.iterdir():
607
+ if video_path.is_file() and video_path.suffix.lower() in video_extensions:
608
  pending += 1
609
  print(pending)
610
  PY
611
  }
612
 
613
+ cleanup_stale_download_claims() {
614
+ python - <<PY
615
+ import os
616
+ import subprocess
617
+ from pathlib import Path
618
+ claim_dir = Path("$DOWNLOAD_CLAIM_DIR")
619
+ claim_dir.mkdir(parents=True, exist_ok=True)
620
+ removed = 0
621
+ try:
622
+ result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%A"], check=True, capture_output=True, text=True)
623
+ active_jobs = {line.split("_", 1)[0].strip() for line in result.stdout.splitlines() if line.strip()}
624
+ except Exception:
625
+ active_jobs = set()
626
+ for claim_path in claim_dir.glob("*.claim"):
627
+ try:
628
+ lines = claim_path.read_text(encoding="utf-8").splitlines()
629
+ except OSError:
630
+ continue
631
+ pid = None
632
+ job_id = ""
633
+ for line in lines:
634
+ if line.startswith("pid="):
635
+ try:
636
+ pid = int(line.split("=", 1)[1].strip())
637
+ except ValueError:
638
+ pid = None
639
+ elif line.startswith("job_id="):
640
+ job_id = line.split("=", 1)[1].strip()
641
+ alive = False
642
+ if job_id:
643
+ alive = job_id in active_jobs
644
+ elif pid is not None:
645
+ try:
646
+ os.kill(pid, 0)
647
+ alive = True
648
+ except OSError:
649
+ alive = False
650
+ if not alive:
651
+ claim_path.unlink(missing_ok=True)
652
+ removed += 1
653
+ print(removed)
654
+ PY
655
+ }
656
+
657
+ dir_size_bytes() {
658
+ python - <<PY
659
+ from pathlib import Path
660
+ root = Path("$1")
661
+ total = 0
662
+ if root.exists():
663
+ for path in root.rglob("*"):
664
+ if path.is_file():
665
+ try:
666
+ total += path.stat().st_size
667
+ except OSError:
668
+ pass
669
+ print(total)
670
+ PY
671
+ }
672
+
673
  count_active_process_claims() {
674
  python - <<PY
675
  import subprocess
676
  from pathlib import Path
677
+ claim_dir = Path("$STATE_ROOT/slurm/state/claims")
678
  claim_dir.mkdir(parents=True, exist_ok=True)
679
  try:
680
  result = subprocess.run(["squeue", "-h", "-u", "$USER", "-o", "%A"], check=True, capture_output=True, text=True)
 
710
  if progress_path.exists():
711
  uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys())
712
  count = 0
713
+ if dataset_dir.exists():
714
+ for folder_path in dataset_dir.iterdir():
715
+ if not folder_path.is_dir():
716
+ continue
717
+ if folder_path.name in uploaded:
718
+ continue
719
+ if (folder_path / "npz" / ".complete").exists():
720
+ count += 1
721
  print(count)
722
  PY
723
  }
 
732
  if progress_path.exists():
733
  uploaded = set(json.loads(progress_path.read_text()).get("uploaded_folders", {}).keys())
734
  total = 0
735
+ if dataset_dir.exists():
736
+ for folder_path in dataset_dir.iterdir():
737
+ if not folder_path.is_dir():
738
+ continue
739
+ if folder_path.name in uploaded:
740
+ continue
741
+ if not (folder_path / "npz" / ".complete").exists():
742
+ continue
743
+ for path in folder_path.rglob("*"):
744
+ if path.is_file():
745
+ try:
746
+ total += path.stat().st_size
747
+ except OSError:
748
+ pass
749
  print(total)
750
  PY
751
  }
 
754
  local iteration=0
755
  while true; do
756
  iteration=$((iteration + 1))
757
+ local pruned stale_download_claims
758
  pruned="$(prune_processed_raw_videos)"
759
+ stale_download_claims="$(cleanup_stale_download_claims)"
760
  local pending_download pending_process raw_video_bytes
761
  pending_download="$(count_pending_downloads)"
762
  pending_process="$(count_pending_process)"
763
  raw_video_bytes="$(dir_size_bytes "$RAW_VIDEO_DIR")"
764
+ echo "[download] iteration=$iteration pending_download=$pending_download raw_backlog=$pending_process raw_video_bytes=$raw_video_bytes pruned_raw_videos=$pruned stale_download_claims=$stale_download_claims"
765
 
766
  if [[ "$MAX_ITERATIONS" -gt 0 && "$iteration" -gt "$MAX_ITERATIONS" ]]; then
767
  echo "[download] reached max iterations: $MAX_ITERATIONS"
 
880
  continue
881
  fi
882
 
883
+ if [[ "$complete_pending_upload_bytes" -lt "$TARGET_BYTES" && "$complete_pending_upload" -lt "$TARGET_FOLDERS" && ( "$pending_download" -gt 0 || "$pending_process" -gt 0 ) ]]; then
884
  sleep "$IDLE_SLEEP_SECONDS"
885
  continue
886
  fi
scripts/pipeline01_download_video_fix_caption.py CHANGED
@@ -2,8 +2,10 @@
2
 
3
  import argparse
4
  import csv
 
5
  import html
6
  import json
 
7
  import re
8
  import shutil
9
  import subprocess
@@ -19,7 +21,7 @@ REPO_ROOT = Path(__file__).resolve().parents[1]
19
  if str(REPO_ROOT) not in sys.path:
20
  sys.path.insert(0, str(REPO_ROOT))
21
 
22
- from utils.stats_npz import ensure_record, load_stats, save_stats
23
 
24
 
25
  DEFAULT_SOURCE_METADATA_CSV = REPO_ROOT / "Sign-DWPose-2M-metadata_ori.csv"
@@ -81,6 +83,8 @@ def parse_args() -> argparse.Namespace:
81
  parser.add_argument("--cookies-from-browser", default=None)
82
  parser.add_argument("--extractor-args", default=DEFAULT_YT_DLP_EXTRACTOR_ARGS)
83
  parser.add_argument("--max-failures-before-skip", type=int, default=2)
 
 
84
  return parser.parse_args()
85
 
86
 
@@ -127,6 +131,39 @@ def read_manifest(csv_path: Path) -> Tuple[List[Dict[str, str]], List[str]]:
127
  return manifest_rows, ordered_fieldnames
128
 
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  def write_manifest(csv_path: Path, rows: Sequence[Dict[str, str]], fieldnames: Sequence[str]) -> None:
131
  tmp_path = csv_path.with_suffix(csv_path.suffix + ".tmp")
132
  with tmp_path.open("w", encoding="utf-8", newline="") as handle:
@@ -137,6 +174,103 @@ def write_manifest(csv_path: Path, rows: Sequence[Dict[str, str]], fieldnames: S
137
  tmp_path.replace(csv_path)
138
 
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  def repo_relative_or_absolute(path: Path) -> str:
141
  resolved_path = path.resolve()
142
  try:
@@ -543,6 +677,51 @@ def select_english_subtitle(subtitle_payloads: Dict[str, Dict[str, object]]) ->
543
  return "", ""
544
 
545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
546
  def persist_raw_metadata(raw_metadata_dir: Path, video_id: str, metadata: Dict[str, object]) -> str:
547
  raw_metadata_dir.mkdir(parents=True, exist_ok=True)
548
  metadata_path = raw_metadata_dir / f"{video_id}.json"
@@ -636,11 +815,7 @@ def main() -> None:
636
  args._effective_cookies = temp_cookie_path
637
  else:
638
  args._effective_cookies = None
639
- manifest_input_path = args.output_metadata_csv if args.output_metadata_csv.exists() else args.source_metadata_csv
640
- rows, fieldnames = read_manifest(manifest_input_path)
641
- local_video_ids = collect_local_video_ids(args) if args.local_only else None
642
- selected_rows = iter_target_rows(rows, args.video_ids, args.limit, local_video_ids, args)
643
- stats = load_stats(args.stats_npz)
644
 
645
  try:
646
  args.raw_video_dir.mkdir(parents=True, exist_ok=True)
@@ -651,7 +826,7 @@ def main() -> None:
651
  video_id = row["video_id"].strip()
652
  if not video_id:
653
  continue
654
- stats_record = ensure_record(stats, video_id)
655
 
656
  print(f"[{index}/{len(selected_rows)}] Processing {video_id}")
657
  metadata_error = ""
@@ -677,8 +852,8 @@ def main() -> None:
677
  stats_record["updated_at"] = row["processed_at"]
678
  failure_count, should_skip = record_row_failure(row, stats_record, metadata_error, args.max_failures_before_skip)
679
  stats_record["last_error"] = row["error"]
680
- write_manifest(args.output_metadata_csv, rows, fieldnames)
681
- save_stats(args.stats_npz, stats)
682
  print(f" metadata failed: {metadata_error}")
683
  if should_skip:
684
  print(f" skipping after {failure_count} failures")
@@ -771,8 +946,8 @@ def main() -> None:
771
  stats_record["download_status"] = row["download_status"]
772
  stats_record["last_error"] = row["error"]
773
  stats_record["updated_at"] = row["processed_at"]
774
- write_manifest(args.output_metadata_csv, rows, fieldnames)
775
- save_stats(args.stats_npz, stats)
776
 
777
  if row["download_status"] == "failed":
778
  print(f" video download failed: {download_error}")
 
2
 
3
  import argparse
4
  import csv
5
+ import fcntl
6
  import html
7
  import json
8
+ import os
9
  import re
10
  import shutil
11
  import subprocess
 
21
  if str(REPO_ROOT) not in sys.path:
22
  sys.path.insert(0, str(REPO_ROOT))
23
 
24
+ from utils.stats_npz import update_video_stats
25
 
26
 
27
  DEFAULT_SOURCE_METADATA_CSV = REPO_ROOT / "Sign-DWPose-2M-metadata_ori.csv"
 
83
  parser.add_argument("--cookies-from-browser", default=None)
84
  parser.add_argument("--extractor-args", default=DEFAULT_YT_DLP_EXTRACTOR_ARGS)
85
  parser.add_argument("--max-failures-before-skip", type=int, default=2)
86
+ parser.add_argument("--claim-dir", type=Path, default=None)
87
+ parser.add_argument("--csv-lock-path", type=Path, default=None)
88
  return parser.parse_args()
89
 
90
 
 
131
  return manifest_rows, ordered_fieldnames
132
 
133
 
134
+ def read_state_manifest(source_csv: Path, output_csv: Path) -> Tuple[List[Dict[str, str]], List[str]]:
135
+ source_rows, source_fieldnames = read_manifest(source_csv)
136
+ if not output_csv.exists():
137
+ return source_rows, source_fieldnames
138
+
139
+ output_rows, output_fieldnames = read_manifest(output_csv)
140
+ ordered_fieldnames: List[str] = []
141
+ for column in list(source_fieldnames) + list(output_fieldnames):
142
+ if column and column not in ordered_fieldnames:
143
+ ordered_fieldnames.append(column)
144
+
145
+ merged_rows: List[Dict[str, str]] = []
146
+ output_by_id = {row.get("video_id", "").strip(): row for row in output_rows if row.get("video_id", "").strip()}
147
+ seen = set()
148
+ for source_row in source_rows:
149
+ video_id = source_row.get("video_id", "").strip()
150
+ if not video_id:
151
+ continue
152
+ merged = {column: source_row.get(column, "") for column in ordered_fieldnames}
153
+ if video_id in output_by_id:
154
+ merge_row_values(merged, output_by_id[video_id], ordered_fieldnames)
155
+ merged_rows.append(merged)
156
+ seen.add(video_id)
157
+
158
+ for video_id, row in output_by_id.items():
159
+ if video_id in seen:
160
+ continue
161
+ merged = {column: row.get(column, "") for column in ordered_fieldnames}
162
+ merged_rows.append(merged)
163
+
164
+ return merged_rows, ordered_fieldnames
165
+
166
+
167
  def write_manifest(csv_path: Path, rows: Sequence[Dict[str, str]], fieldnames: Sequence[str]) -> None:
168
  tmp_path = csv_path.with_suffix(csv_path.suffix + ".tmp")
169
  with tmp_path.open("w", encoding="utf-8", newline="") as handle:
 
174
  tmp_path.replace(csv_path)
175
 
176
 
177
+ def lock_path_for_manifest(output_csv: Path, explicit_lock_path: Path | None) -> Path:
178
+ return explicit_lock_path or output_csv.with_suffix(output_csv.suffix + ".lock")
179
+
180
+
181
+ def claim_path_for_video(claim_dir: Path, video_id: str) -> Path:
182
+ return claim_dir / f"{video_id}.claim"
183
+
184
+
185
+ def with_manifest_lock(lock_path: Path):
186
+ lock_path.parent.mkdir(parents=True, exist_ok=True)
187
+ handle = lock_path.open("a+", encoding="utf-8")
188
+ fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
189
+ return handle
190
+
191
+
192
+ def merge_row_values(target_row: Dict[str, str], updates: Dict[str, str], fieldnames: Sequence[str]) -> None:
193
+ for field in fieldnames:
194
+ if field in updates:
195
+ target_row[field] = updates.get(field, "")
196
+
197
+
198
+ def claim_target_rows(args: argparse.Namespace) -> Tuple[List[Dict[str, str]], List[Dict[str, str]], List[str], Path]:
199
+ manifest_input_path = args.output_metadata_csv if args.output_metadata_csv.exists() else args.source_metadata_csv
200
+ claim_dir = args.claim_dir
201
+ if claim_dir is None:
202
+ rows, fieldnames = read_state_manifest(args.source_metadata_csv, args.output_metadata_csv)
203
+ local_video_ids = collect_local_video_ids(args) if args.local_only else None
204
+ selected_rows = iter_target_rows(rows, args.video_ids, args.limit, local_video_ids, args)
205
+ return rows, selected_rows, fieldnames, manifest_input_path
206
+
207
+ claim_dir.mkdir(parents=True, exist_ok=True)
208
+ lock_path = lock_path_for_manifest(args.output_metadata_csv, args.csv_lock_path)
209
+ handle = with_manifest_lock(lock_path)
210
+ try:
211
+ rows, fieldnames = read_state_manifest(args.source_metadata_csv, args.output_metadata_csv)
212
+ local_video_ids = collect_local_video_ids(args) if args.local_only else None
213
+ selected_rows: List[Dict[str, str]] = []
214
+ video_id_filter = set(args.video_ids or [])
215
+ limit = args.limit
216
+ for row in rows:
217
+ video_id = row["video_id"].strip()
218
+ if not video_id:
219
+ continue
220
+ if video_id_filter and video_id not in video_id_filter:
221
+ continue
222
+ if local_video_ids is not None and video_id not in local_video_ids:
223
+ continue
224
+ if not row_needs_processing(row, args):
225
+ continue
226
+ claim_path = claim_path_for_video(claim_dir, video_id)
227
+ try:
228
+ fd = os.open(claim_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
229
+ except FileExistsError:
230
+ continue
231
+ with os.fdopen(fd, "w", encoding="utf-8") as claim_handle:
232
+ claim_handle.write(f"pid={os.getpid()}\n")
233
+ claim_handle.write(f"video_id={video_id}\n")
234
+ claim_handle.write(f"claimed_at={time.strftime('%Y-%m-%d %H:%M:%S')}\n")
235
+ selected_rows.append(dict(row))
236
+ if limit is not None and len(selected_rows) >= limit:
237
+ break
238
+ return rows, selected_rows, fieldnames, manifest_input_path
239
+ finally:
240
+ fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
241
+ handle.close()
242
+
243
+
244
+ def release_claim(args: argparse.Namespace, video_id: str) -> None:
245
+ if args.claim_dir is None:
246
+ return
247
+ claim_path_for_video(args.claim_dir, video_id).unlink(missing_ok=True)
248
+
249
+
250
+ def persist_row_update(args: argparse.Namespace, video_id: str, updated_row: Dict[str, str], fieldnames: Sequence[str]) -> None:
251
+ lock_path = lock_path_for_manifest(args.output_metadata_csv, args.csv_lock_path)
252
+ handle = with_manifest_lock(lock_path)
253
+ try:
254
+ rows, current_fieldnames = read_state_manifest(args.source_metadata_csv, args.output_metadata_csv)
255
+ ordered_fieldnames = []
256
+ for column in list(current_fieldnames) + list(fieldnames):
257
+ if column and column not in ordered_fieldnames:
258
+ ordered_fieldnames.append(column)
259
+ found = False
260
+ for row in rows:
261
+ if row.get("video_id", "").strip() == video_id:
262
+ merge_row_values(row, updated_row, ordered_fieldnames)
263
+ found = True
264
+ break
265
+ if not found:
266
+ new_row = {column: updated_row.get(column, "") for column in ordered_fieldnames}
267
+ rows.append(new_row)
268
+ write_manifest(args.output_metadata_csv, rows, ordered_fieldnames)
269
+ finally:
270
+ fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
271
+ handle.close()
272
+
273
+
274
  def repo_relative_or_absolute(path: Path) -> str:
275
  resolved_path = path.resolve()
276
  try:
 
677
  return "", ""
678
 
679
 
680
+ def subtitle_json_path_for_video(subtitle_dir: Path, video_id: str) -> Path:
681
+ return subtitle_dir / f"{video_id}.captions.json"
682
+
683
+
684
+ def build_subtitle_json_payload(
685
+ video_id: str,
686
+ subtitle_payloads: Dict[str, Dict[str, object]],
687
+ subtitle_en: str,
688
+ subtitle_en_source: str,
689
+ ) -> Dict[str, object]:
690
+ languages: Dict[str, Dict[str, object]] = {}
691
+ compact_texts: Dict[str, str] = {}
692
+ for lang, payload in sorted(subtitle_payloads.items()):
693
+ compact_texts[lang] = str(payload.get("text") or "")
694
+ languages[lang] = {
695
+ "text": compact_texts[lang],
696
+ "segments": payload.get("segments") or [],
697
+ "vtt_path": payload.get("vtt_path") or "",
698
+ }
699
+ return {
700
+ "video_id": video_id,
701
+ "subtitle_languages": sorted(subtitle_payloads),
702
+ "subtitle_en": subtitle_en,
703
+ "subtitle_en_source": subtitle_en_source,
704
+ "subtitle_texts": compact_texts,
705
+ "languages": languages,
706
+ }
707
+
708
+
709
+ def write_subtitle_json(
710
+ subtitle_dir: Path,
711
+ video_id: str,
712
+ subtitle_payloads: Dict[str, Dict[str, object]],
713
+ subtitle_en: str,
714
+ subtitle_en_source: str,
715
+ ) -> Path | None:
716
+ json_path = subtitle_json_path_for_video(subtitle_dir, video_id)
717
+ if not subtitle_payloads:
718
+ json_path.unlink(missing_ok=True)
719
+ return None
720
+ payload = build_subtitle_json_payload(video_id, subtitle_payloads, subtitle_en, subtitle_en_source)
721
+ json_path.write_text(json.dumps(payload, ensure_ascii=False, indent=2), encoding="utf-8")
722
+ return json_path
723
+
724
+
725
  def persist_raw_metadata(raw_metadata_dir: Path, video_id: str, metadata: Dict[str, object]) -> str:
726
  raw_metadata_dir.mkdir(parents=True, exist_ok=True)
727
  metadata_path = raw_metadata_dir / f"{video_id}.json"
 
815
  args._effective_cookies = temp_cookie_path
816
  else:
817
  args._effective_cookies = None
818
+ rows, selected_rows, fieldnames, _manifest_input_path = claim_target_rows(args)
 
 
 
 
819
 
820
  try:
821
  args.raw_video_dir.mkdir(parents=True, exist_ok=True)
 
826
  video_id = row["video_id"].strip()
827
  if not video_id:
828
  continue
829
+ stats_record = {}
830
 
831
  print(f"[{index}/{len(selected_rows)}] Processing {video_id}")
832
  metadata_error = ""
 
852
  stats_record["updated_at"] = row["processed_at"]
853
  failure_count, should_skip = record_row_failure(row, stats_record, metadata_error, args.max_failures_before_skip)
854
  stats_record["last_error"] = row["error"]
855
+ persist_row_update(args, video_id, row, fieldnames)
856
+ update_video_stats(args.stats_npz, video_id, **stats_record)
857
  print(f" metadata failed: {metadata_error}")
858
  if should_skip:
859
  print(f" skipping after {failure_count} failures")
 
946
  stats_record["download_status"] = row["download_status"]
947
  stats_record["last_error"] = row["error"]
948
  stats_record["updated_at"] = row["processed_at"]
949
+ persist_row_update(args, video_id, row, fieldnames)
950
+ update_video_stats(args.stats_npz, video_id, **stats_record)
951
 
952
  if row["download_status"] == "failed":
953
  print(f" video download failed: {download_error}")
scripts/pipeline02_extract_dwpose_from_video.py CHANGED
@@ -44,6 +44,19 @@ def parse_args() -> argparse.Namespace:
44
  parser.add_argument("--delete-source-on-success", action="store_true")
45
  parser.add_argument("--tmp-root", type=Path, default=Path("/tmp"))
46
  parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  return parser.parse_args()
48
 
49
 
@@ -125,10 +138,19 @@ def build_npz_payload(pose_data: Dict[str, np.ndarray], width: int, height: int)
125
  return payload
126
 
127
 
128
- def process_video(video_path: Path, dataset_dir: Path, fps: int, detector: DWposeDetector, tmp_root: Path, force: bool) -> None:
 
 
 
 
 
 
 
 
129
  video_id = video_path.stem
130
  output_npz_dir = dataset_dir / video_id / "npz"
131
  complete_marker = output_npz_dir / COMPLETE_MARKER_NAME
 
132
  if output_npz_dir.exists() and complete_marker.exists() and not force:
133
  print(f"Skip {video_id}: NPZ files already exist")
134
  return
@@ -146,18 +168,39 @@ def process_video(video_path: Path, dataset_dir: Path, fps: int, detector: DWpos
146
  total_frames = len(frame_paths)
147
  print(f"{video_id}: extracted {total_frames} frames at {fps} fps")
148
 
 
 
 
 
149
  for frame_index, frame_path in enumerate(frame_paths, start=1):
150
  with Image.open(frame_path) as image:
151
  frame = image.convert("RGB")
152
  width, height = frame.size
153
  pose_data = detector(frame, draw_pose=False, include_hands=True, include_face=True)
154
  payload = build_npz_payload(pose_data, width, height)
155
- np.savez(output_npz_dir / f"{frame_index:08d}.npz", **payload)
 
 
 
 
 
156
 
157
  if frame_index == 1 or frame_index % 100 == 0 or frame_index == total_frames:
158
  print(f"{video_id}: processed {frame_index}/{total_frames} frames")
 
 
 
 
 
 
 
 
 
 
 
 
159
  complete_marker.write_text(
160
- f"video_id={video_id}\nfps={fps}\nframes={total_frames}\ncompleted_at={time.strftime('%Y-%m-%d %H:%M:%S')}\n",
161
  encoding="utf-8",
162
  )
163
  finally:
@@ -194,6 +237,7 @@ def worker(rank: int, worker_count: int, video_paths: Sequence[Path], args: argp
194
  detector=detector,
195
  tmp_root=args.tmp_root,
196
  force=args.force,
 
197
  )
198
  update_video_stats(
199
  args.stats_npz,
 
44
  parser.add_argument("--delete-source-on-success", action="store_true")
45
  parser.add_argument("--tmp-root", type=Path, default=Path("/tmp"))
46
  parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
47
+ parser.add_argument(
48
+ "--single-poses-npz",
49
+ dest="single_poses_npz",
50
+ action="store_true",
51
+ default=True,
52
+ help="Save one aggregated poses.npz per video (default).",
53
+ )
54
+ parser.add_argument(
55
+ "--per-frame-npz",
56
+ dest="single_poses_npz",
57
+ action="store_false",
58
+ help="Save one NPZ file per frame under the npz directory.",
59
+ )
60
  return parser.parse_args()
61
 
62
 
 
138
  return payload
139
 
140
 
141
+ def process_video(
142
+ video_path: Path,
143
+ dataset_dir: Path,
144
+ fps: int,
145
+ detector: DWposeDetector,
146
+ tmp_root: Path,
147
+ force: bool,
148
+ single_poses_npz: bool,
149
+ ) -> None:
150
  video_id = video_path.stem
151
  output_npz_dir = dataset_dir / video_id / "npz"
152
  complete_marker = output_npz_dir / COMPLETE_MARKER_NAME
153
+ poses_npz_path = output_npz_dir / "poses.npz"
154
  if output_npz_dir.exists() and complete_marker.exists() and not force:
155
  print(f"Skip {video_id}: NPZ files already exist")
156
  return
 
168
  total_frames = len(frame_paths)
169
  print(f"{video_id}: extracted {total_frames} frames at {fps} fps")
170
 
171
+ aggregated_payloads = []
172
+ frame_widths = []
173
+ frame_heights = []
174
+
175
  for frame_index, frame_path in enumerate(frame_paths, start=1):
176
  with Image.open(frame_path) as image:
177
  frame = image.convert("RGB")
178
  width, height = frame.size
179
  pose_data = detector(frame, draw_pose=False, include_hands=True, include_face=True)
180
  payload = build_npz_payload(pose_data, width, height)
181
+ if single_poses_npz:
182
+ aggregated_payloads.append(payload)
183
+ frame_widths.append(width)
184
+ frame_heights.append(height)
185
+ else:
186
+ np.savez(output_npz_dir / f"{frame_index:08d}.npz", **payload)
187
 
188
  if frame_index == 1 or frame_index % 100 == 0 or frame_index == total_frames:
189
  print(f"{video_id}: processed {frame_index}/{total_frames} frames")
190
+
191
+ if single_poses_npz:
192
+ np.savez(
193
+ poses_npz_path,
194
+ video_id=np.asarray(video_id),
195
+ fps=np.asarray(fps, dtype=np.int32),
196
+ total_frames=np.asarray(total_frames, dtype=np.int32),
197
+ frame_widths=np.asarray(frame_widths, dtype=np.int32),
198
+ frame_heights=np.asarray(frame_heights, dtype=np.int32),
199
+ frame_payloads=np.asarray(aggregated_payloads, dtype=object),
200
+ )
201
+
202
  complete_marker.write_text(
203
+ f"video_id={video_id}\nfps={fps}\nframes={total_frames}\noutput_mode={'single_poses_npy' if single_poses_npz else 'per_frame_npz'}\ncompleted_at={time.strftime('%Y-%m-%d %H:%M:%S')}\n",
204
  encoding="utf-8",
205
  )
206
  finally:
 
237
  detector=detector,
238
  tmp_root=args.tmp_root,
239
  force=args.force,
240
+ single_poses_npz=args.single_poses_npz,
241
  )
242
  update_video_stats(
243
  args.stats_npz,
scripts/pipeline03_upload_to_huggingface.py CHANGED
@@ -7,6 +7,8 @@ import shutil
7
  import subprocess
8
  import sys
9
  import tarfile
 
 
10
  import time
11
  from pathlib import Path
12
  from typing import Dict, List, Sequence, Tuple
@@ -18,7 +20,7 @@ REPO_ROOT = Path(__file__).resolve().parents[1]
18
  if str(REPO_ROOT) not in sys.path:
19
  sys.path.insert(0, str(REPO_ROOT))
20
 
21
- from utils.stats_npz import update_many_video_stats
22
 
23
 
24
  DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
@@ -27,9 +29,12 @@ DEFAULT_RAW_CAPTION_DIR = REPO_ROOT / "raw_caption"
27
  DEFAULT_RAW_METADATA_DIR = REPO_ROOT / "raw_metadata"
28
  DEFAULT_ARCHIVE_DIR = REPO_ROOT / "archives"
29
  DEFAULT_PROGRESS_PATH = REPO_ROOT / "archive_upload_progress.json"
 
30
  DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
31
  DEFAULT_GIT_CLONE_DIR = DEFAULT_ARCHIVE_DIR / ".hf_git_repo"
32
- DEFAULT_TARGET_BYTES = 14 * 1024 * 1024 * 1024
 
 
33
  COMPLETE_MARKER_NAME = ".complete"
34
 
35
 
@@ -44,17 +49,54 @@ def parse_args() -> argparse.Namespace:
44
  parser.add_argument("--archive-dir", type=Path, default=DEFAULT_ARCHIVE_DIR)
45
  parser.add_argument("--progress-path", type=Path, default=DEFAULT_PROGRESS_PATH)
46
  parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
 
47
  parser.add_argument("--repo-id", default="SignerX/Sign-DWPose-2M")
48
  parser.add_argument("--repo-type", default="dataset")
49
  parser.add_argument("--target-bytes", type=int, default=DEFAULT_TARGET_BYTES)
 
50
  parser.add_argument("--require-target-bytes", action="store_true")
51
  parser.add_argument("--dry-run", action="store_true")
52
- parser.add_argument("--upload-mode", choices=["git-ssh", "api"], default=os.environ.get("HF_UPLOAD_MODE", "git-ssh"))
53
  parser.add_argument("--git-clone-dir", type=Path, default=DEFAULT_GIT_CLONE_DIR)
54
- parser.add_argument("--token", default=os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_HUB_TOKEN"))
55
  return parser.parse_args()
56
 
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  def load_progress(progress_path: Path) -> Dict[str, object]:
59
  if progress_path.exists():
60
  with progress_path.open("r", encoding="utf-8") as handle:
@@ -71,12 +113,18 @@ def save_progress(progress_path: Path, progress: Dict[str, object]) -> None:
71
  def folder_size_bytes(folder_path: Path) -> int:
72
  total = 0
73
  for path in folder_path.rglob("*"):
74
- if path.is_file():
75
- total += path.stat().st_size
 
 
 
 
 
 
76
  return total
77
 
78
 
79
- def list_unuploaded_folders(dataset_dir: Path, progress: Dict[str, object]) -> List[Tuple[str, Path, int]]:
80
  uploaded_folders = progress.get("uploaded_folders", {})
81
  folders = []
82
  for folder_path in sorted(dataset_dir.iterdir()):
@@ -88,10 +136,14 @@ def list_unuploaded_folders(dataset_dir: Path, progress: Dict[str, object]) -> L
88
  npz_dir = folder_path / "npz"
89
  if not (npz_dir / COMPLETE_MARKER_NAME).exists():
90
  continue
91
- folders.append((folder_name, folder_path, folder_size_bytes(folder_path)))
92
  return folders
93
 
94
 
 
 
 
 
95
  def build_batch(folders: Sequence[Tuple[str, Path, int]], target_bytes: int) -> List[Tuple[str, Path, int]]:
96
  batch = []
97
  total_bytes = 0
@@ -122,6 +174,19 @@ def next_archive_index(progress: Dict[str, object], repo_files: Sequence[str]) -
122
  return (max(indices) + 1) if indices else 1
123
 
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  def create_tar_archive(archive_path: Path, dataset_dir: Path, folder_names: Sequence[str]) -> None:
126
  archive_path.parent.mkdir(parents=True, exist_ok=True)
127
  with tarfile.open(archive_path, mode="w") as tar:
@@ -132,10 +197,63 @@ def create_tar_archive(archive_path: Path, dataset_dir: Path, folder_names: Sequ
132
  def upload_archive(api: HfApi, repo_id: str, repo_type: str, archive_path: Path) -> None:
133
  api.upload_file(
134
  path_or_fileobj=str(archive_path),
135
- path_in_repo=archive_path.name,
 
 
 
 
 
 
 
 
 
 
 
136
  repo_id=repo_id,
137
  repo_type=repo_type,
138
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
 
141
  def repo_git_url(repo_id: str, repo_type: str) -> str:
@@ -218,6 +336,29 @@ def cleanup_local_assets(
218
  metadata_path.unlink()
219
 
220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  def format_size(num_bytes: int) -> str:
222
  size = float(num_bytes)
223
  for unit in ["B", "KB", "MB", "GB", "TB"]:
@@ -229,36 +370,61 @@ def format_size(num_bytes: int) -> str:
229
 
230
  def main() -> None:
231
  args = parse_args()
 
232
  progress = load_progress(args.progress_path)
233
- api = HfApi(token=args.token) if args.upload_mode == "api" else None
 
 
 
234
  args.dataset_dir.mkdir(parents=True, exist_ok=True)
235
 
236
  try:
237
- if args.upload_mode == "api":
 
238
  repo_files = api.list_repo_files(repo_id=args.repo_id, repo_type=args.repo_type)
239
  else:
 
240
  repo_files = list_repo_files_via_git(args.git_clone_dir, args.repo_id, args.repo_type)
241
- except Exception:
 
 
242
  repo_files = []
243
 
244
  while True:
245
- remaining_folders = list_unuploaded_folders(args.dataset_dir, progress)
246
- if not remaining_folders:
 
 
 
 
 
 
 
247
  print("No unuploaded dataset folders remain.")
248
  break
249
- remaining_bytes = total_batchable_bytes(remaining_folders)
250
- if args.require_target_bytes and remaining_bytes < args.target_bytes:
251
- print(
252
- f"Skip upload: only {format_size(remaining_bytes)} of completed NPZ folders available, below target {format_size(args.target_bytes)}."
253
- )
254
- break
255
-
256
- batch = build_batch(remaining_folders, args.target_bytes)
257
- batch_names = [name for name, _, _ in batch]
258
- batch_bytes = sum(folder_bytes for _, _, folder_bytes in batch)
 
 
 
 
 
 
 
 
 
259
  archive_index = next_archive_index(progress, repo_files)
260
  archive_name = f"Sign_DWPose_NPZ_{archive_index:06d}.tar"
261
- archive_path = args.archive_dir / archive_name
 
262
 
263
  print(f"Create archive {archive_name} with {len(batch_names)} folders ({format_size(batch_bytes)})")
264
  for folder_name in batch_names:
@@ -268,8 +434,9 @@ def main() -> None:
268
  break
269
 
270
  args.archive_dir.mkdir(parents=True, exist_ok=True)
271
- update_many_video_stats(
272
  args.stats_npz,
 
273
  batch_names,
274
  upload_status="uploading",
275
  archive_name=archive_name,
@@ -277,14 +444,18 @@ def main() -> None:
277
  updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
278
  )
279
  try:
280
- create_tar_archive(archive_path, args.dataset_dir, batch_names)
281
- if args.upload_mode == "api":
282
- upload_archive(api, args.repo_id, args.repo_type, archive_path)
283
  else:
284
- upload_archive_via_git(args.git_clone_dir, args.repo_id, args.repo_type, archive_path)
 
 
 
 
285
  except Exception as exc:
286
- update_many_video_stats(
287
  args.stats_npz,
 
288
  batch_names,
289
  upload_status="failed",
290
  local_cleanup_status="pending",
@@ -292,6 +463,8 @@ def main() -> None:
292
  last_error=str(exc),
293
  updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
294
  )
 
 
295
  raise
296
 
297
  progress["archives"][archive_name] = {
@@ -312,12 +485,14 @@ def main() -> None:
312
  args.raw_caption_dir,
313
  args.raw_metadata_dir,
314
  )
315
- archive_path.unlink(missing_ok=True)
 
316
  except Exception as exc:
317
  cleanup_error = str(exc)
318
- repo_files.append(archive_name)
319
- update_many_video_stats(
320
  args.stats_npz,
 
321
  batch_names,
322
  upload_status="uploaded",
323
  local_cleanup_status="deleted" if not cleanup_error else "failed",
@@ -325,10 +500,23 @@ def main() -> None:
325
  last_error=cleanup_error,
326
  updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
327
  )
 
 
 
 
 
 
 
 
328
  if cleanup_error:
329
  raise RuntimeError(f"Uploaded {archive_name} but local cleanup failed: {cleanup_error}")
330
  print(f"Uploaded {archive_name} and cleaned raw assets for {len(batch_names)} videos.")
331
 
332
 
333
  if __name__ == "__main__":
334
- main()
 
 
 
 
 
 
7
  import subprocess
8
  import sys
9
  import tarfile
10
+ import threading
11
+ import traceback
12
  import time
13
  from pathlib import Path
14
  from typing import Dict, List, Sequence, Tuple
 
20
  if str(REPO_ROOT) not in sys.path:
21
  sys.path.insert(0, str(REPO_ROOT))
22
 
23
+ from utils.stats_npz import update_many_video_stats_with_retry
24
 
25
 
26
  DEFAULT_DATASET_DIR = REPO_ROOT / "dataset"
 
29
  DEFAULT_RAW_METADATA_DIR = REPO_ROOT / "raw_metadata"
30
  DEFAULT_ARCHIVE_DIR = REPO_ROOT / "archives"
31
  DEFAULT_PROGRESS_PATH = REPO_ROOT / "archive_upload_progress.json"
32
+ DEFAULT_STATUS_JOURNAL_PATH = REPO_ROOT / "upload_status_journal.jsonl"
33
  DEFAULT_STATS_NPZ = REPO_ROOT / "stats.npz"
34
  DEFAULT_GIT_CLONE_DIR = DEFAULT_ARCHIVE_DIR / ".hf_git_repo"
35
+ DEFAULT_TOKEN_PATH = Path.home() / ".hf_token.txt"
36
+ DEFAULT_TARGET_BYTES = 10 * 1024 * 1024 * 1024
37
+ DEFAULT_TARGET_FOLDERS = 40
38
  COMPLETE_MARKER_NAME = ".complete"
39
 
40
 
 
49
  parser.add_argument("--archive-dir", type=Path, default=DEFAULT_ARCHIVE_DIR)
50
  parser.add_argument("--progress-path", type=Path, default=DEFAULT_PROGRESS_PATH)
51
  parser.add_argument("--stats-npz", type=Path, default=DEFAULT_STATS_NPZ)
52
+ parser.add_argument("--status-journal-path", type=Path, default=DEFAULT_STATUS_JOURNAL_PATH)
53
  parser.add_argument("--repo-id", default="SignerX/Sign-DWPose-2M")
54
  parser.add_argument("--repo-type", default="dataset")
55
  parser.add_argument("--target-bytes", type=int, default=DEFAULT_TARGET_BYTES)
56
+ parser.add_argument("--target-folders", type=int, default=DEFAULT_TARGET_FOLDERS)
57
  parser.add_argument("--require-target-bytes", action="store_true")
58
  parser.add_argument("--dry-run", action="store_true")
59
+ parser.add_argument("--upload-mode", choices=["git-ssh", "api", "api-stream"], default=os.environ.get("HF_UPLOAD_MODE", "api"))
60
  parser.add_argument("--git-clone-dir", type=Path, default=DEFAULT_GIT_CLONE_DIR)
61
+ parser.add_argument("--token", default=None)
62
  return parser.parse_args()
63
 
64
 
65
+
66
+
67
+ def resolve_token(cli_token: str | None) -> str | None:
68
+ if cli_token:
69
+ return cli_token
70
+ env_token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_HUB_TOKEN")
71
+ if env_token:
72
+ return env_token
73
+ if DEFAULT_TOKEN_PATH.exists():
74
+ token = DEFAULT_TOKEN_PATH.read_text(encoding="utf-8").strip()
75
+ return token or None
76
+ return None
77
+
78
+
79
+
80
+ def append_status_journal(journal_path: Path, video_ids: Sequence[str], **updates: str) -> None:
81
+ journal_path.parent.mkdir(parents=True, exist_ok=True)
82
+ payload = {
83
+ "video_ids": list(video_ids),
84
+ "updates": {k: ("" if v is None else str(v)) for k, v in updates.items()},
85
+ "recorded_at": time.strftime("%Y-%m-%d %H:%M:%S"),
86
+ }
87
+ with journal_path.open("a", encoding="utf-8") as handle:
88
+ handle.write(json.dumps(payload, ensure_ascii=False) + "\n")
89
+
90
+
91
+ def update_many_video_stats_best_effort(stats_path: Path, journal_path: Path, video_ids: Sequence[str], **updates: str) -> None:
92
+ try:
93
+ update_many_video_stats_with_retry(stats_path, video_ids, **updates)
94
+ except Exception as exc:
95
+ payload = dict(updates)
96
+ payload["last_error"] = str(exc) if not payload.get("last_error") else payload["last_error"]
97
+ append_status_journal(journal_path, video_ids, **payload)
98
+ print(f"Warning: stats.npz update deferred to journal due to: {exc}")
99
+
100
  def load_progress(progress_path: Path) -> Dict[str, object]:
101
  if progress_path.exists():
102
  with progress_path.open("r", encoding="utf-8") as handle:
 
113
  def folder_size_bytes(folder_path: Path) -> int:
114
  total = 0
115
  for path in folder_path.rglob("*"):
116
+ if not path.is_file():
117
+ continue
118
+ stat_result = path.stat()
119
+ # Use allocated blocks so upload triggering follows quota pressure,
120
+ # not just logical payload bytes. This avoids undercounting millions
121
+ # of tiny NPZ files.
122
+ allocated_bytes = getattr(stat_result, "st_blocks", 0) * 512
123
+ total += allocated_bytes if allocated_bytes > 0 else stat_result.st_size
124
  return total
125
 
126
 
127
+ def list_unuploaded_folder_paths(dataset_dir: Path, progress: Dict[str, object]) -> List[Tuple[str, Path]]:
128
  uploaded_folders = progress.get("uploaded_folders", {})
129
  folders = []
130
  for folder_path in sorted(dataset_dir.iterdir()):
 
136
  npz_dir = folder_path / "npz"
137
  if not (npz_dir / COMPLETE_MARKER_NAME).exists():
138
  continue
139
+ folders.append((folder_name, folder_path))
140
  return folders
141
 
142
 
143
+ def enrich_folder_sizes(folders: Sequence[Tuple[str, Path]]) -> List[Tuple[str, Path, int]]:
144
+ return [(folder_name, folder_path, folder_size_bytes(folder_path)) for folder_name, folder_path in folders]
145
+
146
+
147
  def build_batch(folders: Sequence[Tuple[str, Path, int]], target_bytes: int) -> List[Tuple[str, Path, int]]:
148
  batch = []
149
  total_bytes = 0
 
174
  return (max(indices) + 1) if indices else 1
175
 
176
 
177
+
178
+
179
+ def preferred_temp_archive_dir() -> Path:
180
+ for key in ("SLURM_TMPDIR", "TMPDIR"):
181
+ value = os.environ.get(key)
182
+ if value:
183
+ path = Path(value)
184
+ path.mkdir(parents=True, exist_ok=True)
185
+ return path
186
+ path = Path("/tmp")
187
+ path.mkdir(parents=True, exist_ok=True)
188
+ return path
189
+
190
  def create_tar_archive(archive_path: Path, dataset_dir: Path, folder_names: Sequence[str]) -> None:
191
  archive_path.parent.mkdir(parents=True, exist_ok=True)
192
  with tarfile.open(archive_path, mode="w") as tar:
 
197
  def upload_archive(api: HfApi, repo_id: str, repo_type: str, archive_path: Path) -> None:
198
  api.upload_file(
199
  path_or_fileobj=str(archive_path),
200
+ path_in_repo=f"dataset/{archive_path.name}",
201
+ repo_id=repo_id,
202
+ repo_type=repo_type,
203
+ )
204
+
205
+
206
+ def upload_runtime_state_files(api: HfApi | None, repo_id: str, repo_type: str, progress_path: Path, journal_path: Path) -> None:
207
+ if api is None:
208
+ return
209
+ api.upload_file(
210
+ path_or_fileobj=str(progress_path),
211
+ path_in_repo="runtime_state/archive_upload_progress.json",
212
  repo_id=repo_id,
213
  repo_type=repo_type,
214
  )
215
+ if journal_path.exists():
216
+ api.upload_file(
217
+ path_or_fileobj=str(journal_path),
218
+ path_in_repo="runtime_state/upload_status_journal.jsonl",
219
+ repo_id=repo_id,
220
+ repo_type=repo_type,
221
+ )
222
+
223
+
224
+ def upload_archive_streaming(api: HfApi, repo_id: str, repo_type: str, dataset_dir: Path, folder_names: Sequence[str], archive_name: str) -> None:
225
+ if api is None:
226
+ raise RuntimeError('api-stream upload requires a Hugging Face token')
227
+ command = ['tar', '-cf', '-', '-C', str(dataset_dir), *folder_names]
228
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
229
+ assert process.stdout is not None
230
+ stderr_chunks = []
231
+
232
+ def _read_stderr() -> None:
233
+ assert process.stderr is not None
234
+ data = process.stderr.read()
235
+ if data:
236
+ stderr_chunks.append(data.decode('utf-8', errors='replace'))
237
+
238
+ stderr_thread = threading.Thread(target=_read_stderr, daemon=True)
239
+ stderr_thread.start()
240
+ try:
241
+ api.upload_file(
242
+ path_or_fileobj=process.stdout,
243
+ path_in_repo=f"dataset/{archive_name}",
244
+ repo_id=repo_id,
245
+ repo_type=repo_type,
246
+ )
247
+ finally:
248
+ try:
249
+ process.stdout.close()
250
+ except Exception:
251
+ pass
252
+ return_code = process.wait()
253
+ stderr_thread.join(timeout=5)
254
+ if return_code != 0:
255
+ stderr_text = ''.join(stderr_chunks).strip()
256
+ raise RuntimeError(stderr_text or f'tar streaming command failed with exit code {return_code}')
257
 
258
 
259
  def repo_git_url(repo_id: str, repo_type: str) -> str:
 
336
  metadata_path.unlink()
337
 
338
 
339
+ def prune_uploaded_runtime_residue(
340
+ progress: Dict[str, object],
341
+ dataset_dir: Path,
342
+ raw_video_dir: Path,
343
+ raw_caption_dir: Path,
344
+ raw_metadata_dir: Path,
345
+ ) -> None:
346
+ uploaded = set(progress.get("uploaded_folders", {}))
347
+ for video_id in uploaded:
348
+ for path in raw_video_dir.glob(f"{video_id}.*"):
349
+ if path.is_file():
350
+ path.unlink(missing_ok=True)
351
+ caption_dir = raw_caption_dir / video_id
352
+ if caption_dir.exists():
353
+ shutil.rmtree(caption_dir, ignore_errors=True)
354
+ metadata_path = raw_metadata_dir / f"{video_id}.json"
355
+ if metadata_path.exists():
356
+ metadata_path.unlink()
357
+ dataset_video_dir = dataset_dir / video_id
358
+ if dataset_video_dir.exists() and not (dataset_video_dir / "npz" / COMPLETE_MARKER_NAME).exists():
359
+ shutil.rmtree(dataset_video_dir, ignore_errors=True)
360
+
361
+
362
  def format_size(num_bytes: int) -> str:
363
  size = float(num_bytes)
364
  for unit in ["B", "KB", "MB", "GB", "TB"]:
 
370
 
371
  def main() -> None:
372
  args = parse_args()
373
+ print(f"[pipeline03] start upload_mode={args.upload_mode} repo_id={args.repo_id}", flush=True)
374
  progress = load_progress(args.progress_path)
375
+ print(f"[pipeline03] loaded progress archives={len(progress.get("archives", {}))} uploaded_folders={len(progress.get("uploaded_folders", {}))}", flush=True)
376
+ resolved_token = resolve_token(args.token)
377
+ print(f"[pipeline03] token_present={bool(resolved_token)}", flush=True)
378
+ api = HfApi(token=resolved_token) if args.upload_mode in {"api", "api-stream"} else None
379
  args.dataset_dir.mkdir(parents=True, exist_ok=True)
380
 
381
  try:
382
+ if args.upload_mode in {"api", "api-stream"}:
383
+ print("[pipeline03] listing repo files via api", flush=True)
384
  repo_files = api.list_repo_files(repo_id=args.repo_id, repo_type=args.repo_type)
385
  else:
386
+ print("[pipeline03] listing repo files via git", flush=True)
387
  repo_files = list_repo_files_via_git(args.git_clone_dir, args.repo_id, args.repo_type)
388
+ except Exception as exc:
389
+ print(f"[pipeline03] repo file listing failed: {exc}", flush=True)
390
+ traceback.print_exc()
391
  repo_files = []
392
 
393
  while True:
394
+ prune_uploaded_runtime_residue(
395
+ progress,
396
+ args.dataset_dir,
397
+ args.raw_video_dir,
398
+ args.raw_caption_dir,
399
+ args.raw_metadata_dir,
400
+ )
401
+ remaining_folder_paths = list_unuploaded_folder_paths(args.dataset_dir, progress)
402
+ if not remaining_folder_paths:
403
  print("No unuploaded dataset folders remain.")
404
  break
405
+ remaining_count = len(remaining_folder_paths)
406
+ print(f"[pipeline03] remaining completed folders available={remaining_count}", flush=True)
407
+ if remaining_count >= args.target_folders:
408
+ selected_folder_paths = remaining_folder_paths[: args.target_folders]
409
+ batch = enrich_folder_sizes(selected_folder_paths)
410
+ batch_names = [name for name, _, _ in batch]
411
+ batch_bytes = sum(folder_bytes for _, _, folder_bytes in batch)
412
+ print(f"[pipeline03] folder threshold reached; selecting first {len(batch_names)} folders without global size scan", flush=True)
413
+ else:
414
+ remaining_folders = enrich_folder_sizes(remaining_folder_paths)
415
+ remaining_bytes = total_batchable_bytes(remaining_folders)
416
+ if args.require_target_bytes and remaining_bytes < args.target_bytes:
417
+ print(
418
+ f"Skip upload: only {format_size(remaining_bytes)} across {remaining_count} completed NPZ folders available, below targets {format_size(args.target_bytes)} or {args.target_folders} folders."
419
+ )
420
+ break
421
+ batch = build_batch(remaining_folders, args.target_bytes)
422
+ batch_names = [name for name, _, _ in batch]
423
+ batch_bytes = sum(folder_bytes for _, _, folder_bytes in batch)
424
  archive_index = next_archive_index(progress, repo_files)
425
  archive_name = f"Sign_DWPose_NPZ_{archive_index:06d}.tar"
426
+ archive_root = args.archive_dir if args.upload_mode == "git-ssh" else preferred_temp_archive_dir()
427
+ archive_path = archive_root / archive_name
428
 
429
  print(f"Create archive {archive_name} with {len(batch_names)} folders ({format_size(batch_bytes)})")
430
  for folder_name in batch_names:
 
434
  break
435
 
436
  args.archive_dir.mkdir(parents=True, exist_ok=True)
437
+ update_many_video_stats_best_effort(
438
  args.stats_npz,
439
+ args.status_journal_path,
440
  batch_names,
441
  upload_status="uploading",
442
  archive_name=archive_name,
 
444
  updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
445
  )
446
  try:
447
+ if args.upload_mode == "api-stream":
448
+ upload_archive_streaming(api, args.repo_id, args.repo_type, args.dataset_dir, batch_names, archive_name)
 
449
  else:
450
+ create_tar_archive(archive_path, args.dataset_dir, batch_names)
451
+ if args.upload_mode == "api":
452
+ upload_archive(api, args.repo_id, args.repo_type, archive_path)
453
+ else:
454
+ upload_archive_via_git(args.git_clone_dir, args.repo_id, args.repo_type, archive_path)
455
  except Exception as exc:
456
+ update_many_video_stats_best_effort(
457
  args.stats_npz,
458
+ args.status_journal_path,
459
  batch_names,
460
  upload_status="failed",
461
  local_cleanup_status="pending",
 
463
  last_error=str(exc),
464
  updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
465
  )
466
+ if archive_path.exists():
467
+ archive_path.unlink(missing_ok=True)
468
  raise
469
 
470
  progress["archives"][archive_name] = {
 
485
  args.raw_caption_dir,
486
  args.raw_metadata_dir,
487
  )
488
+ if archive_path.exists():
489
+ archive_path.unlink(missing_ok=True)
490
  except Exception as exc:
491
  cleanup_error = str(exc)
492
+ repo_files.append(f"dataset/{archive_name}")
493
+ update_many_video_stats_best_effort(
494
  args.stats_npz,
495
+ args.status_journal_path,
496
  batch_names,
497
  upload_status="uploaded",
498
  local_cleanup_status="deleted" if not cleanup_error else "failed",
 
500
  last_error=cleanup_error,
501
  updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
502
  )
503
+ upload_runtime_state_files(api, args.repo_id, args.repo_type, args.progress_path, args.status_journal_path)
504
+ prune_uploaded_runtime_residue(
505
+ progress,
506
+ args.dataset_dir,
507
+ args.raw_video_dir,
508
+ args.raw_caption_dir,
509
+ args.raw_metadata_dir,
510
+ )
511
  if cleanup_error:
512
  raise RuntimeError(f"Uploaded {archive_name} but local cleanup failed: {cleanup_error}")
513
  print(f"Uploaded {archive_name} and cleaned raw assets for {len(batch_names)} videos.")
514
 
515
 
516
  if __name__ == "__main__":
517
+ try:
518
+ main()
519
+ except Exception as exc:
520
+ print(f"[pipeline03] fatal: {exc}", file=sys.stderr, flush=True)
521
+ traceback.print_exc()
522
+ raise
scripts/visualize_dwpose_npz.py CHANGED
@@ -93,12 +93,26 @@ def get_stablesigner_openpose_draw():
93
  return _STABLE_SIGNER_OPENPOSE_DRAW
94
 
95
 
96
- def load_npz_frame(npz_path: Path) -> Dict[str, object]:
97
  payload = np.load(npz_path, allow_pickle=True)
98
- frame: Dict[str, object] = {}
99
- frame["num_persons"] = int(payload["num_persons"])
100
- frame["frame_width"] = int(payload["frame_width"])
101
- frame["frame_height"] = int(payload["frame_height"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  for person_idx in range(frame["num_persons"]):
104
  source_prefix = f"person_{person_idx:03d}"
@@ -115,8 +129,8 @@ def load_npz_frame(npz_path: Path) -> Dict[str, object]:
115
  "right_hand_scores",
116
  ):
117
  key = f"{source_prefix}_{suffix}"
118
- if key in payload.files:
119
- person_data[suffix] = payload[key]
120
  if person_data:
121
  frame[target_prefix] = person_data
122
  return frame
@@ -385,7 +399,7 @@ def render_pose_image(frame: Dict[str, object], draw_style: str, transparent: bo
385
  def save_frame_previews(npz_paths: Iterable[Path], single_frame_dir: Path, draw_style: str, conf_threshold: float) -> None:
386
  single_frame_dir.mkdir(parents=True, exist_ok=True)
387
  for npz_path in npz_paths:
388
- frame = load_npz_frame(npz_path)
389
  image = render_pose_image(frame, draw_style=draw_style, transparent=False, conf_threshold=conf_threshold)
390
  image.save(single_frame_dir / f"{npz_path.stem}.png")
391
 
@@ -394,7 +408,7 @@ def render_pose_frames(npz_paths: List[Path], pose_frame_dir: Path, draw_style:
394
  pose_frame_dir.mkdir(parents=True, exist_ok=True)
395
  total = len(npz_paths)
396
  for index, npz_path in enumerate(npz_paths, start=1):
397
- frame = load_npz_frame(npz_path)
398
  image = render_pose_image(frame, draw_style=draw_style, transparent=False, conf_threshold=conf_threshold)
399
  image.save(pose_frame_dir / f"{npz_path.stem}.png")
400
  if index == 1 or index % 100 == 0 or index == total:
@@ -463,7 +477,7 @@ def render_overlay_frames(
463
  overlay_dir.mkdir(parents=True, exist_ok=True)
464
  frame_count = min(len(npz_paths), len(raw_frame_paths))
465
  for index, (npz_path, raw_frame_path) in enumerate(zip(npz_paths[:frame_count], raw_frame_paths[:frame_count]), start=1):
466
- frame = load_npz_frame(npz_path)
467
  pose_rgba = render_pose_image(frame, draw_style=draw_style, transparent=True, conf_threshold=conf_threshold)
468
  with Image.open(raw_frame_path) as raw_image:
469
  base = raw_image.convert("RGBA")
@@ -488,7 +502,11 @@ def main() -> None:
488
  if not npz_dir.exists():
489
  raise FileNotFoundError(f"NPZ directory not found: {npz_dir}")
490
 
491
- npz_paths = sorted(npz_dir.glob("*.npz"))
 
 
 
 
492
  if args.max_frames is not None:
493
  npz_paths = npz_paths[: args.max_frames]
494
  if not npz_paths:
 
93
  return _STABLE_SIGNER_OPENPOSE_DRAW
94
 
95
 
96
+ def load_npz_frame(npz_path: Path, aggregated_index: int = 0) -> Dict[str, object]:
97
  payload = np.load(npz_path, allow_pickle=True)
98
+ if "frame_payloads" in payload.files:
99
+ frame_payloads = payload["frame_payloads"]
100
+ if aggregated_index >= len(frame_payloads):
101
+ raise IndexError(f"Aggregated frame index {aggregated_index} out of range for {npz_path}")
102
+ payload_dict = frame_payloads[aggregated_index]
103
+ if hasattr(payload_dict, "item"):
104
+ payload_dict = payload_dict.item()
105
+ frame: Dict[str, object] = {}
106
+ frame["num_persons"] = int(payload_dict["num_persons"])
107
+ frame["frame_width"] = int(payload_dict["frame_width"])
108
+ frame["frame_height"] = int(payload_dict["frame_height"])
109
+ source = payload_dict
110
+ else:
111
+ frame = {}
112
+ frame["num_persons"] = int(payload["num_persons"])
113
+ frame["frame_width"] = int(payload["frame_width"])
114
+ frame["frame_height"] = int(payload["frame_height"])
115
+ source = payload
116
 
117
  for person_idx in range(frame["num_persons"]):
118
  source_prefix = f"person_{person_idx:03d}"
 
129
  "right_hand_scores",
130
  ):
131
  key = f"{source_prefix}_{suffix}"
132
+ if key in source:
133
+ person_data[suffix] = source[key]
134
  if person_data:
135
  frame[target_prefix] = person_data
136
  return frame
 
399
  def save_frame_previews(npz_paths: Iterable[Path], single_frame_dir: Path, draw_style: str, conf_threshold: float) -> None:
400
  single_frame_dir.mkdir(parents=True, exist_ok=True)
401
  for npz_path in npz_paths:
402
+ frame = load_npz_frame(npz_path, aggregated_index=index - 1 if npz_path.name == "poses.npz" else 0)
403
  image = render_pose_image(frame, draw_style=draw_style, transparent=False, conf_threshold=conf_threshold)
404
  image.save(single_frame_dir / f"{npz_path.stem}.png")
405
 
 
408
  pose_frame_dir.mkdir(parents=True, exist_ok=True)
409
  total = len(npz_paths)
410
  for index, npz_path in enumerate(npz_paths, start=1):
411
+ frame = load_npz_frame(npz_path, aggregated_index=index - 1 if npz_path.name == "poses.npz" else 0)
412
  image = render_pose_image(frame, draw_style=draw_style, transparent=False, conf_threshold=conf_threshold)
413
  image.save(pose_frame_dir / f"{npz_path.stem}.png")
414
  if index == 1 or index % 100 == 0 or index == total:
 
477
  overlay_dir.mkdir(parents=True, exist_ok=True)
478
  frame_count = min(len(npz_paths), len(raw_frame_paths))
479
  for index, (npz_path, raw_frame_path) in enumerate(zip(npz_paths[:frame_count], raw_frame_paths[:frame_count]), start=1):
480
+ frame = load_npz_frame(npz_path, aggregated_index=index - 1 if npz_path.name == "poses.npz" else 0)
481
  pose_rgba = render_pose_image(frame, draw_style=draw_style, transparent=True, conf_threshold=conf_threshold)
482
  with Image.open(raw_frame_path) as raw_image:
483
  base = raw_image.convert("RGBA")
 
502
  if not npz_dir.exists():
503
  raise FileNotFoundError(f"NPZ directory not found: {npz_dir}")
504
 
505
+ poses_npz_path = npz_dir / "poses.npz"
506
+ if poses_npz_path.exists():
507
+ npz_paths = [poses_npz_path]
508
+ else:
509
+ npz_paths = sorted(npz_dir.glob("*.npz"))
510
  if args.max_frames is not None:
511
  npz_paths = npz_paths[: args.max_frames]
512
  if not npz_paths:
slurm/process_download_array.slurm ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #SBATCH --job-name=download
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks=1
5
+ #SBATCH --cpus-per-task=1
6
+ #SBATCH --mem=4G
7
+ #SBATCH --time=04:00:00
8
+ #SBATCH --output=%x_%A_%a.out
9
+ #SBATCH --error=%x_%A_%a.err
10
+
11
+ set -euo pipefail
12
+
13
+ ROOT_DIR="${ROOT_DIR:-/home/sf895/Sign-DWPose-2M}"
14
+ RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/Sign-DWPose-2M-runtime}"
15
+ STATE_ROOT="${STATE_ROOT:-/home/sf895/Sign-DWPose-2M-runtime}"
16
+ CONDA_SH="${CONDA_SH:-/home/sf895/miniconda3/etc/profile.d/conda.sh}"
17
+ CONDA_ENV="${CONDA_ENV:-signx2}"
18
+ SOURCE_METADATA_CSV="${SOURCE_METADATA_CSV:-$RUNTIME_ROOT/Sign-DWPose-2M-metadata_ori.csv}"
19
+ OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/Sign-DWPose-2M-metadata_processed.csv}"
20
+ RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
21
+ RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}"
22
+ RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}"
23
+ DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
24
+ STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
25
+ PIPELINE01="${PIPELINE01:-$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py}"
26
+ DOWNLOAD_CLAIM_DIR="${DOWNLOAD_CLAIM_DIR:-$STATE_ROOT/slurm/state/download_claims}"
27
+ DOWNLOAD_CSV_LOCK_PATH="${DOWNLOAD_CSV_LOCK_PATH:-$STATE_ROOT/Sign-DWPose-2M-metadata_processed.csv.lock}"
28
+ MANIFEST="${MANIFEST:-${1:-}}"
29
+
30
+ if [[ -z "$MANIFEST" ]]; then
31
+ echo "MANIFEST is required (env var or first positional arg)." >&2
32
+ exit 1
33
+ fi
34
+ if [[ ! -f "$MANIFEST" ]]; then
35
+ echo "Manifest not found: $MANIFEST" >&2
36
+ exit 1
37
+ fi
38
+ if [[ -z "${SLURM_ARRAY_TASK_ID:-}" ]]; then
39
+ echo "SLURM_ARRAY_TASK_ID is required." >&2
40
+ exit 1
41
+ fi
42
+ if [[ ! -f "$CONDA_SH" ]]; then
43
+ echo "Missing conda init script: $CONDA_SH" >&2
44
+ exit 1
45
+ fi
46
+
47
+ VIDEO_ID="$(sed -n "$((SLURM_ARRAY_TASK_ID + 1))p" "$MANIFEST")"
48
+ if [[ -z "$VIDEO_ID" ]]; then
49
+ echo "No video id found for task index ${SLURM_ARRAY_TASK_ID} in manifest $MANIFEST" >&2
50
+ exit 1
51
+ fi
52
+
53
+ mkdir -p "$DOWNLOAD_CLAIM_DIR"
54
+ CLAIM_PATH="$DOWNLOAD_CLAIM_DIR/${VIDEO_ID}.claim"
55
+ cleanup_claim() {
56
+ rm -f "$CLAIM_PATH"
57
+ }
58
+ trap cleanup_claim EXIT
59
+
60
+ echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} host=$(hostname) video_id=$VIDEO_ID"
61
+
62
+ # shellcheck disable=SC1090
63
+ source "$CONDA_SH"
64
+ cmd=(python "$PIPELINE01"
65
+ --source-metadata-csv "$SOURCE_METADATA_CSV"
66
+ --output-metadata-csv "$OUTPUT_METADATA_CSV"
67
+ --raw-video-dir "$RAW_VIDEO_DIR"
68
+ --raw-caption-dir "$RAW_CAPTION_DIR"
69
+ --raw-metadata-dir "$RAW_METADATA_DIR"
70
+ --dataset-dir "$DATASET_DIR"
71
+ --stats-npz "$STATS_NPZ"
72
+ --csv-lock-path "$DOWNLOAD_CSV_LOCK_PATH"
73
+ --limit 1
74
+ --video-ids "$VIDEO_ID"
75
+ )
76
+ if [[ "${FORCE_METADATA:-0}" == "1" ]]; then
77
+ cmd+=(--force-metadata)
78
+ fi
79
+ if [[ "${FORCE_SUBTITLES:-0}" == "1" ]]; then
80
+ cmd+=(--force-subtitles)
81
+ fi
82
+ if [[ "${FORCE_DOWNLOAD:-0}" == "1" ]]; then
83
+ cmd+=(--force-download)
84
+ fi
85
+ if [[ "${SKIP_VIDEO_DOWNLOAD:-0}" == "1" ]]; then
86
+ cmd+=(--skip-video-download)
87
+ fi
88
+ if [[ "${SKIP_SUBTITLES:-0}" == "1" ]]; then
89
+ cmd+=(--skip-subtitles)
90
+ fi
91
+ if [[ -n "${COOKIES_FROM_BROWSER:-}" ]]; then
92
+ cmd+=(--cookies-from-browser "$COOKIES_FROM_BROWSER")
93
+ fi
94
+ if [[ -n "${COOKIES_FILE:-}" ]]; then
95
+ cmd+=(--cookies "$COOKIES_FILE")
96
+ fi
97
+ if [[ -n "${EXTRACTOR_ARGS:-}" ]]; then
98
+ cmd+=(--extractor-args "$EXTRACTOR_ARGS")
99
+ fi
100
+ CONDA_NO_PLUGINS=true conda run -n "$CONDA_ENV" "${cmd[@]}"
slurm/process_dwpose_array.slurm CHANGED
@@ -25,6 +25,8 @@ TMP_ROOT="${TMP_ROOT:-${SLURM_TMPDIR:-/tmp}}"
25
  FORCE_PROCESS="${FORCE_PROCESS:-0}"
26
  DELETE_SOURCE_ON_SUCCESS="${DELETE_SOURCE_ON_SUCCESS:-0}"
27
  CLAIM_DIR="${CLAIM_DIR:-$STATE_ROOT/slurm/state/claims}"
 
 
28
 
29
  MANIFEST="${MANIFEST:-${1:-}}"
30
  if [[ -z "$MANIFEST" ]]; then
@@ -50,7 +52,9 @@ if [[ -z "$VIDEO_ID" ]]; then
50
  exit 1
51
  fi
52
 
 
53
  CLAIM_PATH="$CLAIM_DIR/${VIDEO_ID}.claim"
 
54
  cleanup_claim() {
55
  rm -f "$CLAIM_PATH"
56
  }
@@ -63,6 +67,91 @@ echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} hos
63
 
64
  # shellcheck disable=SC1090
65
  source "$CONDA_SH"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  cmd=(python -u "$PIPELINE02"
68
  --raw-video-dir "$RAW_VIDEO_DIR"
@@ -80,4 +169,28 @@ if [[ "$DELETE_SOURCE_ON_SUCCESS" == "1" ]]; then
80
  cmd+=(--delete-source-on-success)
81
  fi
82
 
83
- CONDA_NO_PLUGINS=true conda run -n "$CONDA_ENV" "${cmd[@]}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  FORCE_PROCESS="${FORCE_PROCESS:-0}"
26
  DELETE_SOURCE_ON_SUCCESS="${DELETE_SOURCE_ON_SUCCESS:-0}"
27
  CLAIM_DIR="${CLAIM_DIR:-$STATE_ROOT/slurm/state/claims}"
28
+ RETRY_DIR="${RETRY_DIR:-$STATE_ROOT/slurm/state/gpu_init_retries}"
29
+ MAX_GPU_INIT_RETRIES="${MAX_GPU_INIT_RETRIES:-3}"
30
 
31
  MANIFEST="${MANIFEST:-${1:-}}"
32
  if [[ -z "$MANIFEST" ]]; then
 
52
  exit 1
53
  fi
54
 
55
+ mkdir -p "$CLAIM_DIR" "$RETRY_DIR"
56
  CLAIM_PATH="$CLAIM_DIR/${VIDEO_ID}.claim"
57
+ RETRY_STATE_PATH="$RETRY_DIR/${VIDEO_ID}.state"
58
  cleanup_claim() {
59
  rm -f "$CLAIM_PATH"
60
  }
 
67
 
68
  # shellcheck disable=SC1090
69
  source "$CONDA_SH"
70
+ CONDA_ENV_PREFIX="$(conda env list | awk '$1 == env {print $NF}' env="$CONDA_ENV")"
71
+ if [[ -z "$CONDA_ENV_PREFIX" ]]; then
72
+ echo "Unable to resolve conda env prefix for $CONDA_ENV" >&2
73
+ exit 1
74
+ fi
75
+ export LD_LIBRARY_PATH="$CONDA_ENV_PREFIX/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
76
+ export PATH="$CONDA_ENV_PREFIX/bin:$PATH"
77
+ LIBSTDCXX_PATH="$CONDA_ENV_PREFIX/lib/libstdc++.so.6"
78
+ LIBGCC_PATH="$CONDA_ENV_PREFIX/lib/libgcc_s.so.1"
79
+ LD_PRELOAD_VALUE=""
80
+ if [[ -f "$LIBSTDCXX_PATH" ]]; then
81
+ LD_PRELOAD_VALUE="$LIBSTDCXX_PATH"
82
+ fi
83
+ if [[ -f "$LIBGCC_PATH" ]]; then
84
+ LD_PRELOAD_VALUE="${LD_PRELOAD_VALUE:+$LD_PRELOAD_VALUE:}$LIBGCC_PATH"
85
+ fi
86
+ export LD_PRELOAD="${LD_PRELOAD_VALUE}${LD_PRELOAD:+:$LD_PRELOAD}"
87
+ echo "Using conda env prefix=$CONDA_ENV_PREFIX"
88
+ echo "Using LD_PRELOAD=$LD_PRELOAD"
89
+
90
+ gpu_init_retry_state() {
91
+ local attempts="$1"
92
+ local host_name="$2"
93
+ local gpu_id="$3"
94
+ local reason="$4"
95
+ cat > "$RETRY_STATE_PATH" <<STATE
96
+ attempts=$attempts
97
+ host=$host_name
98
+ gpu=$gpu_id
99
+ updated_at=$(date '+%F %T')
100
+ reason=$reason
101
+ STATE
102
+ }
103
+
104
+ get_retry_attempts() {
105
+ if [[ -f "$RETRY_STATE_PATH" ]]; then
106
+ awk -F'=' '/^attempts=/{print $2}' "$RETRY_STATE_PATH" | tail -n 1
107
+ else
108
+ echo 0
109
+ fi
110
+ }
111
+
112
+ mark_retry_exhausted() {
113
+ local attempts="$1"
114
+ local reason="$2"
115
+ python - <<PY
116
+ from pathlib import Path
117
+ import sys, time
118
+ root = Path(r"$ROOT_DIR")
119
+ stats_path = Path(r"$STATS_NPZ")
120
+ video_id = r"$VIDEO_ID"
121
+ attempts = int(r"$attempts")
122
+ reason = r'''$reason'''
123
+ sys.path.insert(0, str(root))
124
+ from utils.stats_npz import update_video_stats
125
+ update_video_stats(
126
+ stats_path,
127
+ video_id,
128
+ process_status="skipped",
129
+ last_error=f"gpu_init_retry_exhausted after {attempts} attempts: {reason}",
130
+ updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
131
+ )
132
+ PY
133
+ rm -f "$RAW_VIDEO_DIR/$VIDEO_ID.mp4" "$RAW_VIDEO_DIR/$VIDEO_ID.mkv" "$RAW_VIDEO_DIR/$VIDEO_ID.webm" "$RAW_VIDEO_DIR/$VIDEO_ID.mov"
134
+ }
135
+
136
+ should_retry_gpu_init_failure() {
137
+ local log_path="$1"
138
+ local npz_dir="$2"
139
+ local npz_count="0"
140
+ if [[ -d "$npz_dir" ]]; then
141
+ npz_count="$(find "$npz_dir" -maxdepth 1 -name '*.npz' | wc -l | tr -d '[:space:]')"
142
+ fi
143
+ if [[ "$npz_count" != "0" ]]; then
144
+ return 1
145
+ fi
146
+ grep -Eiq 'CUDA failure 2: out of memory|Failed to create CUDAExecutionProvider|libcudnn\.so\.8|CUDA is not available|CUDAExecutionProvider|onnxruntime' "$log_path"
147
+ }
148
+
149
+ env_cmd=(env
150
+ "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
151
+ "LD_PRELOAD=$LD_PRELOAD"
152
+ "PATH=$PATH"
153
+ "CONDA_NO_PLUGINS=true"
154
+ )
155
 
156
  cmd=(python -u "$PIPELINE02"
157
  --raw-video-dir "$RAW_VIDEO_DIR"
 
169
  cmd+=(--delete-source-on-success)
170
  fi
171
 
172
+ TMP_LOG="$(mktemp "${TMP_ROOT%/}/dwpose_${VIDEO_ID}_XXXX.log")"
173
+ set +e
174
+ "${env_cmd[@]}" conda run -n "$CONDA_ENV" "${cmd[@]}" 2>&1 | tee "$TMP_LOG"
175
+ cmd_status=${PIPESTATUS[0]}
176
+ set -e
177
+
178
+ if [[ "$cmd_status" -ne 0 ]]; then
179
+ npz_dir="$DATASET_DIR/$VIDEO_ID/npz"
180
+ if should_retry_gpu_init_failure "$TMP_LOG" "$npz_dir"; then
181
+ attempts="$(get_retry_attempts)"
182
+ attempts="$((attempts + 1))"
183
+ last_reason="$(tail -n 80 "$TMP_LOG" | tr '\n' ' ' | sed 's/[[:space:]]\+/ /g' | cut -c1-1200)"
184
+ gpu_init_retry_state "$attempts" "$(hostname)" "${CUDA_VISIBLE_DEVICES:-unset}" "$last_reason"
185
+ if [[ "$attempts" -ge "$MAX_GPU_INIT_RETRIES" ]]; then
186
+ echo "GPU init failed on multiple GPUs; marking $VIDEO_ID as skipped after $attempts attempts." >&2
187
+ mark_retry_exhausted "$attempts" "$last_reason"
188
+ exit 0
189
+ fi
190
+ echo "GPU init failure for $VIDEO_ID on host=$(hostname) gpu=${CUDA_VISIBLE_DEVICES:-unset}; retry attempt $attempts/$MAX_GPU_INIT_RETRIES will be resubmitted later." >&2
191
+ exit 0
192
+ fi
193
+ exit "$cmd_status"
194
+ fi
195
+
196
+ rm -f "$RETRY_STATE_PATH"
slurm/submit_download_slurm.sh ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ ROOT_DIR="${ROOT_DIR:-/home/sf895/Sign-DWPose-2M}"
5
+ RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/Sign-DWPose-2M-runtime}"
6
+ STATE_ROOT="${STATE_ROOT:-/home/sf895/Sign-DWPose-2M-runtime}"
7
+ SOURCE_METADATA_CSV="${SOURCE_METADATA_CSV:-$RUNTIME_ROOT/Sign-DWPose-2M-metadata_ori.csv}"
8
+ OUTPUT_METADATA_CSV="${OUTPUT_METADATA_CSV:-$RUNTIME_ROOT/Sign-DWPose-2M-metadata_processed.csv}"
9
+ RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
10
+ RAW_CAPTION_DIR="${RAW_CAPTION_DIR:-$RUNTIME_ROOT/raw_caption}"
11
+ RAW_METADATA_DIR="${RAW_METADATA_DIR:-$RUNTIME_ROOT/raw_metadata}"
12
+ DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
13
+ STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
14
+ SLURM_SCRIPT="$ROOT_DIR/slurm/process_download_array.slurm"
15
+ MANIFEST_DIR="${MANIFEST_DIR:-$STATE_ROOT/slurm/manifests}"
16
+ LOG_DIR="${LOG_DIR:-$STATE_ROOT/slurm/logs}"
17
+ STATE_DIR="${STATE_DIR:-$STATE_ROOT/slurm/state}"
18
+ DOWNLOAD_CLAIM_DIR="${DOWNLOAD_CLAIM_DIR:-$STATE_DIR/download_claims}"
19
+ LOCK_FILE="$STATE_DIR/submit_download.lock"
20
+ DOWNLOAD_CSV_LOCK_PATH="${DOWNLOAD_CSV_LOCK_PATH:-$STATE_ROOT/Sign-DWPose-2M-metadata_processed.csv.lock}"
21
+ PARTITIONS="main"
22
+ ACCOUNT=""
23
+ TIME_LIMIT="04:00:00"
24
+ CPUS_PER_TASK="1"
25
+ MEMORY="4G"
26
+ LIMIT=""
27
+ ARRAY_PARALLEL=""
28
+ MAX_BACKLOG_VIDEOS="180"
29
+ WORKERS="4"
30
+ VIDEO_IDS=()
31
+ FORCE_METADATA=0
32
+ FORCE_SUBTITLES=0
33
+ FORCE_DOWNLOAD=0
34
+ SKIP_VIDEO_DOWNLOAD=0
35
+ SKIP_SUBTITLES=0
36
+ COOKIES_FROM_BROWSER="${COOKIES_FROM_BROWSER:-}"
37
+ COOKIES_FILE="${COOKIES_FILE:-}"
38
+ EXTRACTOR_ARGS="${EXTRACTOR_ARGS:-}"
39
+
40
+ usage() {
41
+ cat <<USAGE
42
+ Usage:
43
+ bash slurm/submit_download_slurm.sh [options]
44
+
45
+ Options:
46
+ --partitions P1[,P2,...] Comma-separated CPU partitions. Default: main
47
+ --runtime-root DIR Shared runtime root
48
+ --state-root DIR Shared state/log root
49
+ --account NAME Optional Slurm account
50
+ --time HH:MM:SS Default: 04:00:00
51
+ --cpus-per-task N Default: 1
52
+ --mem SIZE Default: 4G
53
+ --limit N Only submit the first N pending, unclaimed videos this cycle
54
+ --array-parallel N Add a %N cap to the array
55
+ --max-backlog-videos N Max raw backlog + active download claims allowed. Default: 180
56
+ --workers N Max download tasks to submit in one cycle. Default: 4
57
+ --claim-dir DIR Download claim directory
58
+ --csv-lock-path PATH CSV lock path
59
+ --video-ids ID [ID ...] Restrict this cycle to specific videos
60
+ --force-metadata
61
+ --force-subtitles
62
+ --force-download
63
+ --skip-video-download
64
+ --skip-subtitles
65
+ --cookies PATH
66
+ --cookies-from-browser SPEC
67
+ --extractor-args TEXT
68
+ --help
69
+ USAGE
70
+ }
71
+
72
+ while [[ $# -gt 0 ]]; do
73
+ case "$1" in
74
+ --partitions) PARTITIONS="$2"; shift 2 ;;
75
+ --runtime-root) RUNTIME_ROOT="$2"; shift 2 ;;
76
+ --state-root) STATE_ROOT="$2"; shift 2 ;;
77
+ --account) ACCOUNT="$2"; shift 2 ;;
78
+ --time) TIME_LIMIT="$2"; shift 2 ;;
79
+ --cpus-per-task) CPUS_PER_TASK="$2"; shift 2 ;;
80
+ --mem) MEMORY="$2"; shift 2 ;;
81
+ --limit) LIMIT="$2"; shift 2 ;;
82
+ --array-parallel) ARRAY_PARALLEL="$2"; shift 2 ;;
83
+ --max-backlog-videos) MAX_BACKLOG_VIDEOS="$2"; shift 2 ;;
84
+ --workers) WORKERS="$2"; shift 2 ;;
85
+ --claim-dir) DOWNLOAD_CLAIM_DIR="$2"; shift 2 ;;
86
+ --csv-lock-path) DOWNLOAD_CSV_LOCK_PATH="$2"; shift 2 ;;
87
+ --video-ids)
88
+ shift
89
+ while [[ $# -gt 0 && "$1" != --* ]]; do
90
+ VIDEO_IDS+=("$1")
91
+ shift
92
+ done
93
+ ;;
94
+ --force-metadata) FORCE_METADATA=1; shift ;;
95
+ --force-subtitles) FORCE_SUBTITLES=1; shift ;;
96
+ --force-download) FORCE_DOWNLOAD=1; shift ;;
97
+ --skip-video-download) SKIP_VIDEO_DOWNLOAD=1; shift ;;
98
+ --skip-subtitles) SKIP_SUBTITLES=1; shift ;;
99
+ --cookies) COOKIES_FILE="$2"; shift 2 ;;
100
+ --cookies-from-browser) COOKIES_FROM_BROWSER="$2"; shift 2 ;;
101
+ --extractor-args) EXTRACTOR_ARGS="$2"; shift 2 ;;
102
+ -h|--help) usage; exit 0 ;;
103
+ *) echo "Unknown argument: $1" >&2; usage >&2; exit 1 ;;
104
+ esac
105
+ done
106
+
107
+ mkdir -p "$MANIFEST_DIR" "$LOG_DIR" "$DOWNLOAD_CLAIM_DIR" "$STATE_DIR"
108
+ exec 9>"$LOCK_FILE"
109
+ if ! flock -n 9; then
110
+ echo "Another submit_download_slurm.sh instance is running; skip this cycle."
111
+ exit 0
112
+ fi
113
+
114
+ TIMESTAMP="$(date '+%Y%m%d_%H%M%S')"
115
+ BASE_MANIFEST="$MANIFEST_DIR/pending_download_${TIMESTAMP}.txt"
116
+ ACTIVE_JOBS_FILE="$STATE_DIR/active_download_jobs_${TIMESTAMP}.txt"
117
+
118
+ squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
119
+
120
+ SELECTED_COUNT="$(python - "$SOURCE_METADATA_CSV" "$OUTPUT_METADATA_CSV" "$RAW_VIDEO_DIR" "$DOWNLOAD_CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$MAX_BACKLOG_VIDEOS" "$LIMIT" "$WORKERS" "$BASE_MANIFEST" "${VIDEO_IDS[*]:-}" <<'PY'
121
+ import csv
122
+ import sys
123
+ from pathlib import Path
124
+
125
+ source_csv = Path(sys.argv[1])
126
+ processed_csv = Path(sys.argv[2])
127
+ raw_video_dir = Path(sys.argv[3])
128
+ claim_dir = Path(sys.argv[4])
129
+ active_jobs_path = Path(sys.argv[5])
130
+ max_backlog = int(sys.argv[6])
131
+ limit_arg = sys.argv[7]
132
+ workers = int(sys.argv[8])
133
+ manifest_path = Path(sys.argv[9])
134
+ video_ids_joined = sys.argv[10].strip()
135
+ limit = int(limit_arg) if limit_arg else None
136
+ video_filter = set(video_ids_joined.split()) if video_ids_joined else None
137
+
138
+ video_extensions = {'.mp4', '.mkv', '.webm', '.mov'}
139
+ active_jobs = set()
140
+ if active_jobs_path.exists():
141
+ active_jobs = {line.strip() for line in active_jobs_path.read_text(encoding='utf-8').splitlines() if line.strip()}
142
+
143
+ claim_dir.mkdir(parents=True, exist_ok=True)
144
+ active_claims = set()
145
+ for claim_path in claim_dir.glob('*.claim'):
146
+ try:
147
+ lines = claim_path.read_text(encoding='utf-8').splitlines()
148
+ except OSError:
149
+ continue
150
+ job_id = ''
151
+ pid = None
152
+ for line in lines:
153
+ if line.startswith('job_id='):
154
+ job_id = line.split('=', 1)[1].strip()
155
+ elif line.startswith('pid='):
156
+ try:
157
+ pid = int(line.split('=', 1)[1].strip())
158
+ except ValueError:
159
+ pid = None
160
+ alive = False
161
+ if job_id:
162
+ alive = job_id in active_jobs
163
+ elif pid is not None:
164
+ try:
165
+ import os
166
+ os.kill(pid, 0)
167
+ alive = True
168
+ except OSError:
169
+ alive = False
170
+ if alive:
171
+ active_claims.add(claim_path.stem)
172
+ else:
173
+ claim_path.unlink(missing_ok=True)
174
+
175
+ raw_backlog = 0
176
+ existing_raw = set()
177
+ if raw_video_dir.exists():
178
+ for path in raw_video_dir.iterdir():
179
+ if path.is_file() and path.suffix.lower() in video_extensions:
180
+ raw_backlog += 1
181
+ existing_raw.add(path.stem)
182
+
183
+ remaining_slots = max(0, max_backlog - raw_backlog - len(active_claims))
184
+ remaining_slots = min(remaining_slots, workers)
185
+ if limit is not None:
186
+ remaining_slots = min(remaining_slots, limit)
187
+ if remaining_slots <= 0:
188
+ manifest_path.write_text('', encoding='utf-8')
189
+ print(0)
190
+ raise SystemExit
191
+
192
+ rows = []
193
+ fieldnames = None
194
+ csv_path = processed_csv if processed_csv.exists() else source_csv
195
+ with csv_path.open('r', encoding='utf-8', newline='') as handle:
196
+ reader = csv.DictReader(handle)
197
+ fieldnames = reader.fieldnames or []
198
+ for row in reader:
199
+ rows.append(row)
200
+
201
+ selected = []
202
+ for row in rows:
203
+ video_id = (row.get('video_id') or '').strip()
204
+ if not video_id:
205
+ continue
206
+ if video_filter is not None and video_id not in video_filter:
207
+ continue
208
+ if video_id in active_claims or video_id in existing_raw:
209
+ continue
210
+ download_status = (row.get('download_status') or '').strip()
211
+ if download_status in {'ok', 'skipped'}:
212
+ continue
213
+ selected.append(video_id)
214
+ if len(selected) >= remaining_slots:
215
+ break
216
+
217
+ manifest_path.write_text(''.join(f'{video_id}\n' for video_id in selected), encoding='utf-8')
218
+ print(len(selected))
219
+ PY
220
+ )"
221
+ rm -f "$ACTIVE_JOBS_FILE"
222
+
223
+ if [[ "$SELECTED_COUNT" == "0" ]]; then
224
+ echo "No pending videos to download, or download backlog cap already reached."
225
+ rm -f "$BASE_MANIFEST"
226
+ exit 0
227
+ fi
228
+
229
+ echo "Created manifest: $BASE_MANIFEST"
230
+ echo "Pending videos selected this cycle: $SELECTED_COUNT"
231
+
232
+ ARRAY_SPEC="0-$((SELECTED_COUNT - 1))"
233
+ if [[ -n "$ARRAY_PARALLEL" ]]; then
234
+ ARRAY_SPEC+="%$ARRAY_PARALLEL"
235
+ fi
236
+
237
+ cmd=(sbatch
238
+ --parsable
239
+ --partition "$PARTITIONS"
240
+ --time "$TIME_LIMIT"
241
+ --cpus-per-task "$CPUS_PER_TASK"
242
+ --mem "$MEMORY"
243
+ --array "$ARRAY_SPEC"
244
+ --output "$LOG_DIR/download_%A_%a.out"
245
+ --error "$LOG_DIR/download_%A_%a.err"
246
+ --export "ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,CONDA_SH=/home/sf895/miniconda3/etc/profile.d/conda.sh,CONDA_ENV=signx2,SOURCE_METADATA_CSV=$SOURCE_METADATA_CSV,OUTPUT_METADATA_CSV=$OUTPUT_METADATA_CSV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,RAW_CAPTION_DIR=$RAW_CAPTION_DIR,RAW_METADATA_DIR=$RAW_METADATA_DIR,DATASET_DIR=$DATASET_DIR,STATS_NPZ=$STATS_NPZ,PIPELINE01=$ROOT_DIR/scripts/pipeline01_download_video_fix_caption.py,DOWNLOAD_CLAIM_DIR=$DOWNLOAD_CLAIM_DIR,DOWNLOAD_CSV_LOCK_PATH=$DOWNLOAD_CSV_LOCK_PATH,MANIFEST=$BASE_MANIFEST,FORCE_METADATA=$FORCE_METADATA,FORCE_SUBTITLES=$FORCE_SUBTITLES,FORCE_DOWNLOAD=$FORCE_DOWNLOAD,SKIP_VIDEO_DOWNLOAD=$SKIP_VIDEO_DOWNLOAD,SKIP_SUBTITLES=$SKIP_SUBTITLES,COOKIES_FILE=$COOKIES_FILE,COOKIES_FROM_BROWSER=$COOKIES_FROM_BROWSER,EXTRACTOR_ARGS=$EXTRACTOR_ARGS"
247
+ )
248
+ if [[ -n "$ACCOUNT" ]]; then
249
+ cmd+=(--account "$ACCOUNT")
250
+ fi
251
+ cmd+=("$SLURM_SCRIPT")
252
+
253
+ JOB_ID="$("${cmd[@]}")"
254
+ echo "Submitted download array job: $JOB_ID"
255
+
256
+ while IFS= read -r video_id; do
257
+ [[ -z "$video_id" ]] && continue
258
+ cat > "$DOWNLOAD_CLAIM_DIR/${video_id}.claim" <<CLAIM
259
+ job_id=$JOB_ID
260
+ video_id=$video_id
261
+ submitted_at=$(date '+%F %T')
262
+ CLAIM
263
+ done < "$BASE_MANIFEST"
264
+
265
+ echo "SUBMITTED_DOWNLOAD_COUNT=$SELECTED_COUNT"
slurm/submit_dwpose_slurm.sh CHANGED
@@ -15,7 +15,7 @@ LOG_DIR="${LOG_DIR:-$STATE_ROOT/slurm/logs}"
15
  STATE_DIR="${STATE_DIR:-$STATE_ROOT/slurm/state}"
16
  CLAIM_DIR="${CLAIM_DIR:-$STATE_DIR/claims}"
17
  LOCK_FILE="$STATE_DIR/submit.lock"
18
- PARTITIONS="gpu"
19
  ACCOUNT=""
20
  TIME_LIMIT="24:00:00"
21
  CPUS_PER_TASK="8"
@@ -23,9 +23,10 @@ MEMORY="32G"
23
  FPS="24"
24
  LIMIT=""
25
  ARRAY_PARALLEL=""
26
- MAX_BACKLOG_VIDEOS="340"
27
  FORCE_PROCESS=0
28
  DELETE_SOURCE_ON_SUCCESS=0
 
29
 
30
  usage() {
31
  cat <<USAGE
@@ -33,98 +34,42 @@ Usage:
33
  bash slurm/submit_dwpose_slurm.sh [options]
34
 
35
  Options:
36
- --partitions P1[,P2,...] Comma-separated partitions. Default: gpu
37
  --runtime-root DIR Shared runtime root for data/state
38
  --state-root DIR Shared state/log root (default: runtime root)
39
- --account NAME Optional Slurm account
40
- --time HH:MM:SS Default: 24:00:00
41
- --cpus-per-task N Default: 8
42
- --mem SIZE Default: 32G
43
- --fps N Default: 24
44
- --limit N Only submit the first N pending, unclaimed videos this cycle
45
- --max-backlog-videos N Max claimed queued/running videos allowed at once. Default: 340
46
- --array-parallel N Add a %N cap to each array
47
- --force-process Re-run videos even if marked complete
 
48
  --delete-source-on-success Delete raw videos after successful processing
49
  --help
50
-
51
- Behavior:
52
- - Uses a claim directory to avoid resubmitting videos that are already queued/running.
53
- - Cleans stale claims whose Slurm jobs are no longer active.
54
- - Builds a manifest of pending raw videos.
55
- - Submits one Slurm array per partition.
56
- - Each array task uses 1 GPU and processes exactly 1 video.
57
-
58
- Examples:
59
- bash slurm/submit_dwpose_slurm.sh
60
- bash slurm/submit_dwpose_slurm.sh --partitions gpu --array-parallel 32
61
- bash slurm/submit_dwpose_slurm.sh --partitions gpu --limit 500
62
  USAGE
63
  }
64
 
65
  while [[ $# -gt 0 ]]; do
66
  case "$1" in
67
- --partitions)
68
- PARTITIONS="$2"
69
- shift 2
70
- ;;
71
- --runtime-root)
72
- RUNTIME_ROOT="$2"
73
- shift 2
74
- ;;
75
- --state-root)
76
- STATE_ROOT="$2"
77
- shift 2
78
- ;;
79
- --account)
80
- ACCOUNT="$2"
81
- shift 2
82
- ;;
83
- --time)
84
- TIME_LIMIT="$2"
85
- shift 2
86
- ;;
87
- --cpus-per-task)
88
- CPUS_PER_TASK="$2"
89
- shift 2
90
- ;;
91
- --mem)
92
- MEMORY="$2"
93
- shift 2
94
- ;;
95
- --fps)
96
- FPS="$2"
97
- shift 2
98
- ;;
99
- --limit)
100
- LIMIT="$2"
101
- shift 2
102
- ;;
103
- --max-backlog-videos)
104
- MAX_BACKLOG_VIDEOS="$2"
105
- shift 2
106
- ;;
107
- --array-parallel)
108
- ARRAY_PARALLEL="$2"
109
- shift 2
110
- ;;
111
- --force-process)
112
- FORCE_PROCESS=1
113
- shift
114
- ;;
115
- --delete-source-on-success)
116
- DELETE_SOURCE_ON_SUCCESS=1
117
- shift
118
- ;;
119
- -h|--help)
120
- usage
121
- exit 0
122
- ;;
123
- *)
124
- echo "Unknown argument: $1" >&2
125
- usage >&2
126
- exit 1
127
- ;;
128
  esac
129
  done
130
 
@@ -135,51 +80,43 @@ if ! flock -n 9; then
135
  exit 0
136
  fi
137
 
138
- IFS=',' read -r -a PARTITION_LIST <<< "$PARTITIONS"
139
-
140
  TIMESTAMP="$(date '+%Y%m%d_%H%M%S')"
141
  BASE_MANIFEST="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.txt"
142
  ACTIVE_JOBS_FILE="$STATE_DIR/active_jobs_${TIMESTAMP}.txt"
 
 
143
  squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
144
 
145
- PENDING_COUNT="$({
146
- cd "$ROOT_DIR"
147
- python - "$ROOT_DIR" "$RAW_VIDEO_DIR" "$DATASET_DIR" "$STATS_NPZ" "$LIMIT" "$FORCE_PROCESS" "$BASE_MANIFEST" "$CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$MAX_BACKLOG_VIDEOS" <<'PY'
148
  import sys
149
  from pathlib import Path
150
 
151
- root_dir = Path(sys.argv[1])
152
- raw_video_dir = Path(sys.argv[2])
153
- dataset_dir = Path(sys.argv[3])
154
- stats_npz = Path(sys.argv[4])
155
- limit_arg = sys.argv[5]
156
- force = sys.argv[6] == "1"
157
- manifest_path = Path(sys.argv[7])
158
- claim_dir = Path(sys.argv[8])
159
- active_jobs_path = Path(sys.argv[9])
160
- max_backlog = int(sys.argv[10])
161
  limit = int(limit_arg) if limit_arg else None
162
 
163
- sys.path.insert(0, str(root_dir))
164
- from utils.stats_npz import load_stats
165
-
166
- video_extensions = {".mp4", ".mkv", ".webm", ".mov"}
167
- stats = load_stats(stats_npz)
168
  claim_dir.mkdir(parents=True, exist_ok=True)
169
  active_jobs = set()
170
  if active_jobs_path.exists():
171
- active_jobs = {line.strip() for line in active_jobs_path.read_text(encoding="utf-8").splitlines() if line.strip()}
172
 
173
  active_claims = set()
174
- for claim_path in claim_dir.glob("*.claim"):
175
  try:
176
- lines = claim_path.read_text(encoding="utf-8").splitlines()
177
  except OSError:
178
  continue
179
- job_id = ""
180
  for line in lines:
181
- if line.startswith("job_id="):
182
- job_id = line.split("=", 1)[1].strip()
183
  break
184
  video_id = claim_path.stem
185
  if job_id and job_id in active_jobs:
@@ -188,32 +125,26 @@ for claim_path in claim_dir.glob("*.claim"):
188
  claim_path.unlink(missing_ok=True)
189
 
190
  remaining_slots = max(0, max_backlog - len(active_claims))
191
- if remaining_slots == 0:
192
- manifest_path.write_text("", encoding="utf-8")
193
- print(0)
194
- raise SystemExit(0)
195
-
196
  selected = []
197
- if raw_video_dir.exists():
198
  for path in sorted(raw_video_dir.iterdir()):
199
  if not path.is_file() or path.suffix.lower() not in video_extensions:
200
  continue
201
  video_id = path.stem
202
  if video_id in active_claims:
203
  continue
204
- npz_dir = dataset_dir / video_id / "npz"
205
- complete_marker = npz_dir / ".complete"
206
- if not force and npz_dir.exists() and complete_marker.exists() and stats.get(video_id, {}).get("process_status") == "ok":
207
  continue
208
  selected.append(video_id)
209
  if len(selected) >= remaining_slots:
210
  break
211
  if limit is not None and len(selected) >= limit:
212
  break
213
- manifest_path.write_text("".join(f"{video_id}\n" for video_id in selected), encoding="utf-8")
214
  print(len(selected))
215
  PY
216
- })"
217
  rm -f "$ACTIVE_JOBS_FILE"
218
 
219
  if [[ "$PENDING_COUNT" == "0" ]]; then
@@ -222,8 +153,65 @@ if [[ "$PENDING_COUNT" == "0" ]]; then
222
  exit 0
223
  fi
224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  echo "Created manifest: $BASE_MANIFEST"
226
  echo "Pending videos selected this cycle: $PENDING_COUNT"
 
 
227
 
228
  write_claims() {
229
  local manifest="$1"
@@ -238,32 +226,32 @@ CLAIM
238
  done < "$manifest"
239
  }
240
 
241
- submit_partition() {
242
  local partition="$1"
243
- local manifest="$2"
244
- local count="$3"
 
245
  local array_spec="0-$((count - 1))"
246
  local job_output job_id
247
  if [[ -n "$ARRAY_PARALLEL" ]]; then
248
  array_spec+="%${ARRAY_PARALLEL}"
249
  fi
250
-
251
  local -a cmd=(sbatch
252
  --partition "$partition"
 
253
  --array "$array_spec"
254
  --cpus-per-task "$CPUS_PER_TASK"
255
  --mem "$MEMORY"
256
  --time "$TIME_LIMIT"
257
- --output "$LOG_DIR/dwpose_${partition}_%A_%a.out"
258
- --error "$LOG_DIR/dwpose_${partition}_%A_%a.err"
259
  --export "ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,CONDA_SH=$CONDA_SH,CONDA_ENV=$CONDA_ENV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,DATASET_DIR=$DATASET_DIR,STATS_NPZ=$STATS_NPZ,FPS=$FPS,FORCE_PROCESS=$FORCE_PROCESS,DELETE_SOURCE_ON_SUCCESS=$DELETE_SOURCE_ON_SUCCESS,MANIFEST=$manifest,CLAIM_DIR=$CLAIM_DIR"
260
  )
261
  if [[ -n "$ACCOUNT" ]]; then
262
  cmd+=(--account "$ACCOUNT")
263
  fi
264
  cmd+=("$SLURM_SCRIPT")
265
-
266
- echo "Submitting partition=$partition array=$array_spec manifest=$manifest"
267
  job_output="$("${cmd[@]}")"
268
  echo "$job_output"
269
  job_id="$(awk '/Submitted batch job/ {print $4}' <<< "$job_output" | tail -n 1)"
@@ -274,21 +262,37 @@ submit_partition() {
274
  write_claims "$manifest" "$job_id"
275
  }
276
 
277
- if [[ ${#PARTITION_LIST[@]} -eq 1 ]]; then
278
- submit_partition "${PARTITION_LIST[0]}" "$BASE_MANIFEST" "$PENDING_COUNT"
279
- echo "SUBMITTED_VIDEO_COUNT=$PENDING_COUNT"
280
- exit 0
281
- fi
282
-
283
- for idx in "${!PARTITION_LIST[@]}"; do
284
- shard_manifest="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.part${idx}.txt"
285
- awk -v mod="${#PARTITION_LIST[@]}" -v rem="$idx" '((NR-1) % mod) == rem { print }' "$BASE_MANIFEST" > "$shard_manifest"
286
- shard_count="$(wc -l < "$shard_manifest" | tr -d '[:space:]')"
 
 
 
 
 
 
287
  if [[ "$shard_count" == "0" ]]; then
288
  rm -f "$shard_manifest"
289
  continue
290
  fi
291
- submit_partition "${PARTITION_LIST[$idx]}" "$shard_manifest" "$shard_count"
292
- done
 
 
 
 
 
 
 
 
293
 
294
- echo "SUBMITTED_VIDEO_COUNT=$PENDING_COUNT"
 
 
 
15
  STATE_DIR="${STATE_DIR:-$STATE_ROOT/slurm/state}"
16
  CLAIM_DIR="${CLAIM_DIR:-$STATE_DIR/claims}"
17
  LOCK_FILE="$STATE_DIR/submit.lock"
18
+ PARTITIONS="gpu,gpu-redhat,cgpu"
19
  ACCOUNT=""
20
  TIME_LIMIT="24:00:00"
21
  CPUS_PER_TASK="8"
 
23
  FPS="24"
24
  LIMIT=""
25
  ARRAY_PARALLEL=""
26
+ MAX_BACKLOG_VIDEOS="180"
27
  FORCE_PROCESS=0
28
  DELETE_SOURCE_ON_SUCCESS=0
29
+ MAX_PER_NODE=""
30
 
31
  usage() {
32
  cat <<USAGE
 
34
  bash slurm/submit_dwpose_slurm.sh [options]
35
 
36
  Options:
37
+ --partitions P1[,P2,...] Comma-separated partitions. Default: gpu,gpu-redhat,cgpu
38
  --runtime-root DIR Shared runtime root for data/state
39
  --state-root DIR Shared state/log root (default: runtime root)
40
+ --account NAME Optional Slurm account
41
+ --time HH:MM:SS Default: 24:00:00
42
+ --cpus-per-task N Default: 8
43
+ --mem SIZE Default: 32G
44
+ --fps N Default: 24
45
+ --limit N Only submit the first N pending, unclaimed videos this cycle
46
+ --max-backlog-videos N Max claimed queued/running videos allowed at once. Default: 180
47
+ --array-parallel N Add a %N cap to each node-local array
48
+ --max-per-node N Cap submissions per node in a cycle
49
+ --force-process Re-run videos even if marked complete
50
  --delete-source-on-success Delete raw videos after successful processing
51
  --help
 
 
 
 
 
 
 
 
 
 
 
 
52
  USAGE
53
  }
54
 
55
  while [[ $# -gt 0 ]]; do
56
  case "$1" in
57
+ --partitions) PARTITIONS="$2"; shift 2 ;;
58
+ --runtime-root) RUNTIME_ROOT="$2"; shift 2 ;;
59
+ --state-root) STATE_ROOT="$2"; shift 2 ;;
60
+ --account) ACCOUNT="$2"; shift 2 ;;
61
+ --time) TIME_LIMIT="$2"; shift 2 ;;
62
+ --cpus-per-task) CPUS_PER_TASK="$2"; shift 2 ;;
63
+ --mem) MEMORY="$2"; shift 2 ;;
64
+ --fps) FPS="$2"; shift 2 ;;
65
+ --limit) LIMIT="$2"; shift 2 ;;
66
+ --max-backlog-videos) MAX_BACKLOG_VIDEOS="$2"; shift 2 ;;
67
+ --array-parallel) ARRAY_PARALLEL="$2"; shift 2 ;;
68
+ --max-per-node) MAX_PER_NODE="$2"; shift 2 ;;
69
+ --force-process) FORCE_PROCESS=1; shift ;;
70
+ --delete-source-on-success) DELETE_SOURCE_ON_SUCCESS=1; shift ;;
71
+ -h|--help) usage; exit 0 ;;
72
+ *) echo "Unknown argument: $1" >&2; usage >&2; exit 1 ;;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  esac
74
  done
75
 
 
80
  exit 0
81
  fi
82
 
 
 
83
  TIMESTAMP="$(date '+%Y%m%d_%H%M%S')"
84
  BASE_MANIFEST="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.txt"
85
  ACTIVE_JOBS_FILE="$STATE_DIR/active_jobs_${TIMESTAMP}.txt"
86
+ NODE_SLOTS_FILE="$STATE_DIR/node_slots_${TIMESTAMP}.txt"
87
+
88
  squeue -h -u "$USER" -o "%A" | sed 's/_.*//' | sort -u > "$ACTIVE_JOBS_FILE"
89
 
90
+ PENDING_COUNT="$(python - "$RAW_VIDEO_DIR" "$DATASET_DIR" "$LIMIT" "$FORCE_PROCESS" "$BASE_MANIFEST" "$CLAIM_DIR" "$ACTIVE_JOBS_FILE" "$MAX_BACKLOG_VIDEOS" <<'PY'
 
 
91
  import sys
92
  from pathlib import Path
93
 
94
+ raw_video_dir = Path(sys.argv[1])
95
+ dataset_dir = Path(sys.argv[2])
96
+ limit_arg = sys.argv[3]
97
+ force = sys.argv[4] == '1'
98
+ manifest_path = Path(sys.argv[5])
99
+ claim_dir = Path(sys.argv[6])
100
+ active_jobs_path = Path(sys.argv[7])
101
+ max_backlog = int(sys.argv[8])
 
 
102
  limit = int(limit_arg) if limit_arg else None
103
 
104
+ video_extensions = {'.mp4', '.mkv', '.webm', '.mov'}
 
 
 
 
105
  claim_dir.mkdir(parents=True, exist_ok=True)
106
  active_jobs = set()
107
  if active_jobs_path.exists():
108
+ active_jobs = {line.strip() for line in active_jobs_path.read_text(encoding='utf-8').splitlines() if line.strip()}
109
 
110
  active_claims = set()
111
+ for claim_path in claim_dir.glob('*.claim'):
112
  try:
113
+ lines = claim_path.read_text(encoding='utf-8').splitlines()
114
  except OSError:
115
  continue
116
+ job_id = ''
117
  for line in lines:
118
+ if line.startswith('job_id='):
119
+ job_id = line.split('=', 1)[1].strip()
120
  break
121
  video_id = claim_path.stem
122
  if job_id and job_id in active_jobs:
 
125
  claim_path.unlink(missing_ok=True)
126
 
127
  remaining_slots = max(0, max_backlog - len(active_claims))
 
 
 
 
 
128
  selected = []
129
+ if remaining_slots > 0 and raw_video_dir.exists():
130
  for path in sorted(raw_video_dir.iterdir()):
131
  if not path.is_file() or path.suffix.lower() not in video_extensions:
132
  continue
133
  video_id = path.stem
134
  if video_id in active_claims:
135
  continue
136
+ complete_marker = dataset_dir / video_id / 'npz' / '.complete'
137
+ if not force and complete_marker.exists():
 
138
  continue
139
  selected.append(video_id)
140
  if len(selected) >= remaining_slots:
141
  break
142
  if limit is not None and len(selected) >= limit:
143
  break
144
+ manifest_path.write_text(''.join(f'{video_id}\n' for video_id in selected), encoding='utf-8')
145
  print(len(selected))
146
  PY
147
+ )"
148
  rm -f "$ACTIVE_JOBS_FILE"
149
 
150
  if [[ "$PENDING_COUNT" == "0" ]]; then
 
153
  exit 0
154
  fi
155
 
156
+ AVAILABLE_SLOTS="$(python - "$PARTITIONS" "$NODE_SLOTS_FILE" "$MAX_PER_NODE" <<'PY'
157
+ import re
158
+ import subprocess
159
+ import sys
160
+ from pathlib import Path
161
+
162
+ parts = [p for p in sys.argv[1].split(',') if p]
163
+ out_path = Path(sys.argv[2])
164
+ max_per_node_arg = sys.argv[3]
165
+ max_per_node = int(max_per_node_arg) if max_per_node_arg else None
166
+ slots = []
167
+ for part in parts:
168
+ proc = subprocess.run(['sinfo', '-h', '-N', '-p', part, '-o', '%N'], capture_output=True, text=True, check=False)
169
+ if proc.returncode != 0:
170
+ continue
171
+ nodes = [line.strip() for line in proc.stdout.splitlines() if line.strip()]
172
+ for node in nodes:
173
+ node_proc = subprocess.run(['scontrol', 'show', 'node', node, '-o'], capture_output=True, text=True, check=False)
174
+ if node_proc.returncode != 0:
175
+ continue
176
+ line = node_proc.stdout.strip()
177
+ if not line:
178
+ continue
179
+ state_m = re.search(r'\bState=([^ ]+)', line)
180
+ state = state_m.group(1).lower() if state_m else ''
181
+ if any(flag in state for flag in ('drain', 'drained', 'down', 'fail', 'inval')):
182
+ continue
183
+ cfg_m = re.search(r'\bCfgTRES=.*?(?:,|^)gres/gpu=(\d+)', line)
184
+ alloc_m = re.search(r'\bAllocTRES=.*?(?:,|^)gres/gpu=(\d+)', line)
185
+ total = int(cfg_m.group(1)) if cfg_m else 0
186
+ used = int(alloc_m.group(1)) if alloc_m else 0
187
+ free = total - used
188
+ if max_per_node is not None:
189
+ free = min(free, max_per_node)
190
+ if free > 0:
191
+ slots.append((part, node, free))
192
+ slots.sort(key=lambda item: (-item[2], item[0], item[1]))
193
+ out_path.write_text(''.join(f'{part}\t{node}\t{free}\n' for part, node, free in slots), encoding='utf-8')
194
+ print(sum(free for _, _, free in slots))
195
+ PY
196
+ )"
197
+
198
+ if [[ -z "$AVAILABLE_SLOTS" || "$AVAILABLE_SLOTS" == "0" ]]; then
199
+ echo "No free GPU slots detected across requested partitions."
200
+ rm -f "$BASE_MANIFEST" "$NODE_SLOTS_FILE"
201
+ exit 0
202
+ fi
203
+
204
+ TARGET_COUNT="$PENDING_COUNT"
205
+ if [[ "$TARGET_COUNT" -gt "$AVAILABLE_SLOTS" ]]; then
206
+ TARGET_COUNT="$AVAILABLE_SLOTS"
207
+ head -n "$TARGET_COUNT" "$BASE_MANIFEST" > "$BASE_MANIFEST.tmp"
208
+ mv "$BASE_MANIFEST.tmp" "$BASE_MANIFEST"
209
+ fi
210
+
211
  echo "Created manifest: $BASE_MANIFEST"
212
  echo "Pending videos selected this cycle: $PENDING_COUNT"
213
+ echo "Available GPU slots right now: $AVAILABLE_SLOTS"
214
+ echo "Submitting now: $TARGET_COUNT"
215
 
216
  write_claims() {
217
  local manifest="$1"
 
226
  done < "$manifest"
227
  }
228
 
229
+ submit_node() {
230
  local partition="$1"
231
+ local node="$2"
232
+ local manifest="$3"
233
+ local count="$4"
234
  local array_spec="0-$((count - 1))"
235
  local job_output job_id
236
  if [[ -n "$ARRAY_PARALLEL" ]]; then
237
  array_spec+="%${ARRAY_PARALLEL}"
238
  fi
 
239
  local -a cmd=(sbatch
240
  --partition "$partition"
241
+ --nodelist "$node"
242
  --array "$array_spec"
243
  --cpus-per-task "$CPUS_PER_TASK"
244
  --mem "$MEMORY"
245
  --time "$TIME_LIMIT"
246
+ --output "$LOG_DIR/dwpose_${partition}_${node}_%A_%a.out"
247
+ --error "$LOG_DIR/dwpose_${partition}_${node}_%A_%a.err"
248
  --export "ALL,ROOT_DIR=$ROOT_DIR,RUNTIME_ROOT=$RUNTIME_ROOT,STATE_ROOT=$STATE_ROOT,CONDA_SH=$CONDA_SH,CONDA_ENV=$CONDA_ENV,RAW_VIDEO_DIR=$RAW_VIDEO_DIR,DATASET_DIR=$DATASET_DIR,STATS_NPZ=$STATS_NPZ,FPS=$FPS,FORCE_PROCESS=$FORCE_PROCESS,DELETE_SOURCE_ON_SUCCESS=$DELETE_SOURCE_ON_SUCCESS,MANIFEST=$manifest,CLAIM_DIR=$CLAIM_DIR"
249
  )
250
  if [[ -n "$ACCOUNT" ]]; then
251
  cmd+=(--account "$ACCOUNT")
252
  fi
253
  cmd+=("$SLURM_SCRIPT")
254
+ echo "Submitting partition=$partition node=$node array=$array_spec manifest=$manifest"
 
255
  job_output="$("${cmd[@]}")"
256
  echo "$job_output"
257
  job_id="$(awk '/Submitted batch job/ {print $4}' <<< "$job_output" | tail -n 1)"
 
262
  write_claims "$manifest" "$job_id"
263
  }
264
 
265
+ submitted_total=0
266
+ failed_nodes=0
267
+ line_no=1
268
+ while IFS=$'\t' read -r partition node free_slots; do
269
+ [[ -z "$partition" ]] && continue
270
+ if [[ "$submitted_total" -ge "$TARGET_COUNT" ]]; then
271
+ break
272
+ fi
273
+ remaining=$((TARGET_COUNT - submitted_total))
274
+ assign_count="$free_slots"
275
+ if [[ "$assign_count" -gt "$remaining" ]]; then
276
+ assign_count="$remaining"
277
+ fi
278
+ shard_manifest="$MANIFEST_DIR/pending_videos_${TIMESTAMP}.${partition}.${node}.txt"
279
+ sed -n "${line_no},$((line_no + assign_count - 1))p" "$BASE_MANIFEST" > "$shard_manifest"
280
+ shard_count="$(wc -l < "$shard_manifest" | tr -d [:space:])"
281
  if [[ "$shard_count" == "0" ]]; then
282
  rm -f "$shard_manifest"
283
  continue
284
  fi
285
+ if submit_node "$partition" "$node" "$shard_manifest" "$shard_count"; then
286
+ submitted_total=$((submitted_total + shard_count))
287
+ line_no=$((line_no + shard_count))
288
+ else
289
+ echo "Node-local submit failed for partition=$partition node=$node; leaving those videos unclaimed for later nodes or the next cycle." >&2
290
+ rm -f "$shard_manifest"
291
+ failed_nodes=$((failed_nodes + 1))
292
+ continue
293
+ fi
294
+ done < "$NODE_SLOTS_FILE"
295
 
296
+ echo "SUBMITTED_VIDEO_COUNT=$submitted_total"
297
+ echo "FAILED_NODE_SUBMITS=$failed_nodes"
298
+ rm -f "$NODE_SLOTS_FILE"
slurm/watch_submit_dwpose.slurm CHANGED
@@ -12,12 +12,12 @@ set -euo pipefail
12
 
13
  ROOT_DIR="${ROOT_DIR:-$(cd "$(dirname "$0")/.." && pwd)}"
14
  SUBMIT_SCRIPT="${SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_dwpose_slurm.sh}"
15
- GPU_PARTITIONS="${GPU_PARTITIONS:-gpu}"
16
  GPU_ACCOUNT="${GPU_ACCOUNT:-}"
17
  SCAN_INTERVAL_SECONDS="${SCAN_INTERVAL_SECONDS:-60}"
18
- SUBMIT_LIMIT="${SUBMIT_LIMIT:-200}"
19
  ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
20
- MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-340}"
21
  FPS="${FPS:-24}"
22
  MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
23
  FORCE_PROCESS="${FORCE_PROCESS:-0}"
 
12
 
13
  ROOT_DIR="${ROOT_DIR:-$(cd "$(dirname "$0")/.." && pwd)}"
14
  SUBMIT_SCRIPT="${SUBMIT_SCRIPT:-$ROOT_DIR/slurm/submit_dwpose_slurm.sh}"
15
+ GPU_PARTITIONS="${GPU_PARTITIONS:-gpu,gpu-redhat,cgpu}"
16
  GPU_ACCOUNT="${GPU_ACCOUNT:-}"
17
  SCAN_INTERVAL_SECONDS="${SCAN_INTERVAL_SECONDS:-60}"
18
+ SUBMIT_LIMIT="${SUBMIT_LIMIT:-180}"
19
  ARRAY_PARALLEL="${ARRAY_PARALLEL:-}"
20
+ MAX_BACKLOG_VIDEOS="${MAX_BACKLOG_VIDEOS:-180}"
21
  FPS="${FPS:-24}"
22
  MAX_ITERATIONS="${MAX_ITERATIONS:-0}"
23
  FORCE_PROCESS="${FORCE_PROCESS:-0}"
utils/stats_npz.py CHANGED
@@ -1,4 +1,7 @@
1
  import fcntl
 
 
 
2
  from pathlib import Path
3
  from typing import Dict, Iterable
4
 
@@ -28,8 +31,12 @@ STATUS_FIELDS = [
28
  ]
29
 
30
 
31
- def load_stats(stats_path: Path) -> Dict[str, Dict[str, str]]:
32
- if not stats_path.exists():
 
 
 
 
33
  return {}
34
 
35
  data = np.load(stats_path, allow_pickle=True)
@@ -44,15 +51,37 @@ def load_stats(stats_path: Path) -> Dict[str, Dict[str, str]]:
44
  return stats
45
 
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  def save_stats(stats_path: Path, stats: Dict[str, Dict[str, str]]) -> None:
48
  stats_path.parent.mkdir(parents=True, exist_ok=True)
49
  video_ids = sorted(stats)
50
  payload = {"video_ids": np.asarray(video_ids, dtype=object)}
51
  for field in STATUS_FIELDS:
52
  payload[field] = np.asarray([stats[video_id].get(field, "") for video_id in video_ids], dtype=object)
53
- tmp_path = stats_path.parent / f".{stats_path.stem}.tmp.npz"
54
  np.savez(tmp_path, **payload)
55
- tmp_path.replace(stats_path)
56
 
57
 
58
  def ensure_record(stats: Dict[str, Dict[str, str]], video_id: str) -> Dict[str, str]:
@@ -61,16 +90,12 @@ def ensure_record(stats: Dict[str, Dict[str, str]], video_id: str) -> Dict[str,
61
  return stats[video_id]
62
 
63
 
64
- def _lock_path(stats_path: Path) -> Path:
65
- return stats_path.with_suffix(stats_path.suffix + ".lock")
66
-
67
-
68
  def update_video_stats(stats_path: Path, video_id: str, **updates: str) -> Dict[str, str]:
69
  lock_path = _lock_path(stats_path)
70
  lock_path.parent.mkdir(parents=True, exist_ok=True)
71
- with lock_path.open("w", encoding="utf-8") as handle:
72
  fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
73
- stats = load_stats(stats_path)
74
  record = ensure_record(stats, video_id)
75
  for key, value in updates.items():
76
  if key in STATUS_FIELDS:
@@ -83,9 +108,9 @@ def update_video_stats(stats_path: Path, video_id: str, **updates: str) -> Dict[
83
  def update_many_video_stats(stats_path: Path, video_ids: Iterable[str], **updates: str) -> None:
84
  lock_path = _lock_path(stats_path)
85
  lock_path.parent.mkdir(parents=True, exist_ok=True)
86
- with lock_path.open("w", encoding="utf-8") as handle:
87
  fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
88
- stats = load_stats(stats_path)
89
  for video_id in video_ids:
90
  record = ensure_record(stats, video_id)
91
  for key, value in updates.items():
@@ -93,3 +118,17 @@ def update_many_video_stats(stats_path: Path, video_ids: Iterable[str], **update
93
  record[key] = "" if value is None else str(value)
94
  save_stats(stats_path, stats)
95
  fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import fcntl
2
+ import os
3
+ import time
4
+ import zipfile
5
  from pathlib import Path
6
  from typing import Dict, Iterable
7
 
 
31
  ]
32
 
33
 
34
+ def _lock_path(stats_path: Path) -> Path:
35
+ return stats_path.with_suffix(stats_path.suffix + ".lock")
36
+
37
+
38
+ def _load_stats_unlocked(stats_path: Path) -> Dict[str, Dict[str, str]]:
39
+ if not stats_path.exists() or stats_path.stat().st_size == 0:
40
  return {}
41
 
42
  data = np.load(stats_path, allow_pickle=True)
 
51
  return stats
52
 
53
 
54
+ def load_stats(stats_path: Path, retries: int = 8, retry_delay: float = 0.2) -> Dict[str, Dict[str, str]]:
55
+ if not stats_path.exists():
56
+ return {}
57
+
58
+ lock_path = _lock_path(stats_path)
59
+ lock_path.parent.mkdir(parents=True, exist_ok=True)
60
+ last_error: Exception | None = None
61
+ for _ in range(retries):
62
+ with lock_path.open("a+", encoding="utf-8") as handle:
63
+ fcntl.flock(handle.fileno(), fcntl.LOCK_SH)
64
+ try:
65
+ return _load_stats_unlocked(stats_path)
66
+ except (EOFError, ValueError, OSError, zipfile.BadZipFile) as exc:
67
+ last_error = exc
68
+ finally:
69
+ fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
70
+ time.sleep(retry_delay)
71
+ if last_error is not None:
72
+ raise last_error
73
+ return {}
74
+
75
+
76
  def save_stats(stats_path: Path, stats: Dict[str, Dict[str, str]]) -> None:
77
  stats_path.parent.mkdir(parents=True, exist_ok=True)
78
  video_ids = sorted(stats)
79
  payload = {"video_ids": np.asarray(video_ids, dtype=object)}
80
  for field in STATUS_FIELDS:
81
  payload[field] = np.asarray([stats[video_id].get(field, "") for video_id in video_ids], dtype=object)
82
+ tmp_path = stats_path.parent / f".{stats_path.stem}.{os.getpid()}.tmp.npz"
83
  np.savez(tmp_path, **payload)
84
+ os.replace(tmp_path, stats_path)
85
 
86
 
87
  def ensure_record(stats: Dict[str, Dict[str, str]], video_id: str) -> Dict[str, str]:
 
90
  return stats[video_id]
91
 
92
 
 
 
 
 
93
  def update_video_stats(stats_path: Path, video_id: str, **updates: str) -> Dict[str, str]:
94
  lock_path = _lock_path(stats_path)
95
  lock_path.parent.mkdir(parents=True, exist_ok=True)
96
+ with lock_path.open("a+", encoding="utf-8") as handle:
97
  fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
98
+ stats = _load_stats_unlocked(stats_path)
99
  record = ensure_record(stats, video_id)
100
  for key, value in updates.items():
101
  if key in STATUS_FIELDS:
 
108
  def update_many_video_stats(stats_path: Path, video_ids: Iterable[str], **updates: str) -> None:
109
  lock_path = _lock_path(stats_path)
110
  lock_path.parent.mkdir(parents=True, exist_ok=True)
111
+ with lock_path.open("a+", encoding="utf-8") as handle:
112
  fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
113
+ stats = _load_stats_unlocked(stats_path)
114
  for video_id in video_ids:
115
  record = ensure_record(stats, video_id)
116
  for key, value in updates.items():
 
118
  record[key] = "" if value is None else str(value)
119
  save_stats(stats_path, stats)
120
  fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
121
+
122
+
123
+ def update_many_video_stats_with_retry(stats_path: Path, video_ids: Iterable[str], retries: int = 8, retry_delay: float = 0.2, **updates: str) -> None:
124
+ last_error: Exception | None = None
125
+ for _ in range(retries):
126
+ try:
127
+ update_many_video_stats(stats_path, video_ids, **updates)
128
+ return
129
+ except (EOFError, ValueError, OSError, zipfile.BadZipFile) as exc:
130
+ last_error = exc
131
+ time.sleep(retry_delay)
132
+ if last_error is not None:
133
+ raise last_error
134
+