|
|
|
|
|
""" |
|
|
Debug wrapper for DeepSpeed training |
|
|
This script allows debugging the training process step by step |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import subprocess |
|
|
import argparse |
|
|
from pathlib import Path |
|
|
|
|
|
def setup_environment(): |
|
|
"""Setup environment variables for debugging""" |
|
|
env_vars = { |
|
|
'RANK': '1', |
|
|
'MASTER_PORT': '29571', |
|
|
'LOCAL_BATCH_SIZE': '2', |
|
|
'GRADIENT_ACCUMULATION_STEPS': '4', |
|
|
'TRANSFORMERS_OFFLINE': '1', |
|
|
'WANDB_PROJECT': 'vtimellm', |
|
|
'MODEL_VERSION': 'vicuna-v1-5-7b', |
|
|
'OUTPUT_DIR': './outputs/', |
|
|
'STAGE4': './outputs/vtimellm-vicuna-v1-5-7b-activitynet-stage4', |
|
|
'PYTHONPATH': f"{os.getcwd()}:{os.environ.get('PYTHONPATH', '')}", |
|
|
'CUDA_VISIBLE_DEVICES': '1', |
|
|
'TORCH_USE_CUDA_DSA': '1', |
|
|
'TRANSFORMERS_VERBOSITY': 'info', |
|
|
'TOKENIZERS_PARALLELISM': 'false' |
|
|
} |
|
|
|
|
|
for key, value in env_vars.items(): |
|
|
os.environ[key] = value |
|
|
|
|
|
return env_vars |
|
|
|
|
|
def check_required_files(): |
|
|
"""Check if all required files exist""" |
|
|
required_files = [ |
|
|
"./checkpoints/vicuna-7b-v1.5", |
|
|
"./data/activitynet/mdpo-train.json", |
|
|
"./data/activitynet/videos/train", |
|
|
"./data/activitynet/clipvitl14-vtimellm.pth", |
|
|
"./checkpoints/vtimellm-vicuna-v1-5-7b-stage1/mm_projector.bin", |
|
|
"./checkpoints/vtimellm-vicuna-v1-5-7b-stage2", |
|
|
"./checkpoints/vtimellm-vicuna-v1-5-7b-stage3", |
|
|
"./checkpoints/vtimellm-vicuna-v1-5-7b-activitynet-stage4", |
|
|
"./scripts/zero2.json" |
|
|
] |
|
|
|
|
|
missing_files = [] |
|
|
for file_path in required_files: |
|
|
if not Path(file_path).exists(): |
|
|
missing_files.append(file_path) |
|
|
else: |
|
|
print(f"✓ Found: {file_path}") |
|
|
|
|
|
if missing_files: |
|
|
print("✗ Missing files:") |
|
|
for file_path in missing_files: |
|
|
print(f" {file_path}") |
|
|
return False |
|
|
|
|
|
return True |
|
|
|
|
|
def check_gpu(): |
|
|
"""Check GPU availability""" |
|
|
try: |
|
|
result = subprocess.run(['nvidia-smi', '--query-gpu=name,memory.total,memory.free', '--format=csv,noheader,nounits'], |
|
|
capture_output=True, text=True) |
|
|
if result.returncode == 0: |
|
|
print("=== GPU Information ===") |
|
|
print(result.stdout) |
|
|
print("========================") |
|
|
return True |
|
|
else: |
|
|
print("Warning: nvidia-smi not available or no GPU found") |
|
|
return False |
|
|
except FileNotFoundError: |
|
|
print("Warning: nvidia-smi not found") |
|
|
return False |
|
|
|
|
|
def create_output_dir(): |
|
|
"""Create output directory""" |
|
|
output_dir = "./outputs/vtimellm-vicuna-v1-5-7b-activitynet-stage5" |
|
|
Path(output_dir).mkdir(parents=True, exist_ok=True) |
|
|
print(f"Created output directory: {output_dir}") |
|
|
return output_dir |
|
|
|
|
|
def run_training(): |
|
|
"""Run the training with DeepSpeed""" |
|
|
env_vars = setup_environment() |
|
|
|
|
|
print("=== Debug Environment Setup ===") |
|
|
for key, value in env_vars.items(): |
|
|
print(f"{key}: {value}") |
|
|
print("================================") |
|
|
|
|
|
print("=== Checking Required Files ===") |
|
|
if not check_required_files(): |
|
|
print("Error: Missing required files. Please check the paths.") |
|
|
return False |
|
|
|
|
|
print("=== Checking GPU ===") |
|
|
check_gpu() |
|
|
|
|
|
print("=== Creating Output Directory ===") |
|
|
create_output_dir() |
|
|
|
|
|
|
|
|
cmd = [ |
|
|
"deepspeed", |
|
|
"--include", f"localhost:{env_vars['RANK']}", |
|
|
"--master_port", env_vars['MASTER_PORT'], |
|
|
"vtimellm/train/train_dpo_mem.py", |
|
|
"--deepspeed", "./scripts/zero2.json", |
|
|
"--lora_enable", "True", |
|
|
"--lora_r", "8", |
|
|
"--lora_alpha", "128", |
|
|
"--training_stage", "3", |
|
|
"--finetuning", "True", |
|
|
"--model_name_or_path", "./checkpoints/vicuna-7b-v1.5", |
|
|
"--version", "v1", |
|
|
"--data_path", "./data/activitynet/mdpo-train.json", |
|
|
"--data_folder", "./data/activitynet/videos/train", |
|
|
"--feat_folder", "./data/activitynet/clipvitl14-vtimellm.pth", |
|
|
"--pretrain_mm_mlp_adapter", "./checkpoints/vtimellm-vicuna-v1-5-7b-stage1/mm_projector.bin", |
|
|
"--stage2_path", "./checkpoints/vtimellm-vicuna-v1-5-7b-stage2", |
|
|
"--stage3_path", "./checkpoints/vtimellm-vicuna-v1-5-7b-stage3", |
|
|
"--stage4_path", "checkpoints/vtimellm-vicuna-v1-5-7b-activitynet-stage4", |
|
|
"--output_dir", "./outputs/vtimellm-vicuna-v1-5-7b-activitynet-stage5", |
|
|
"--bf16", "True", |
|
|
"--max_steps", "100", |
|
|
"--per_device_train_batch_size", env_vars['LOCAL_BATCH_SIZE'], |
|
|
"--gradient_accumulation_steps", env_vars['GRADIENT_ACCUMULATION_STEPS'], |
|
|
"--evaluation_strategy", "no", |
|
|
"--save_strategy", "no", |
|
|
"--save_steps", "50000", |
|
|
"--save_total_limit", "10", |
|
|
"--learning_rate", "1e-6", |
|
|
"--freeze_mm_mlp_adapter", "True", |
|
|
"--weight_decay", "0.", |
|
|
"--warmup_ratio", "0.1", |
|
|
"--lr_scheduler_type", "cosine", |
|
|
"--logging_steps", "1", |
|
|
"--tf32", "True", |
|
|
"--model_max_length", "2048", |
|
|
"--gradient_checkpointing", "True", |
|
|
"--dataloader_num_workers", "4", |
|
|
"--lazy_preprocess", "True", |
|
|
"--report_to", "none", |
|
|
"--run_name", "vtimellm-vicuna-v1-5-7b-activitynet-stage5", |
|
|
"--gamma", "0.0", |
|
|
"--beta", "0.5", |
|
|
"--dpo_alpha", "1.0", |
|
|
"--train4dpo" |
|
|
] |
|
|
|
|
|
print("=== Starting Debug Training ===") |
|
|
print(f"Command: {' '.join(cmd)}") |
|
|
print("================================") |
|
|
|
|
|
try: |
|
|
|
|
|
result = subprocess.run(cmd, check=True) |
|
|
print("=== Training Completed Successfully ===") |
|
|
return True |
|
|
except subprocess.CalledProcessError as e: |
|
|
print(f"=== Training Failed with Error Code: {e.returncode} ===") |
|
|
return False |
|
|
except KeyboardInterrupt: |
|
|
print("=== Training Interrupted by User ===") |
|
|
return False |
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="Debug wrapper for MDPO training") |
|
|
parser.add_argument("--check-only", action="store_true", help="Only check environment and files") |
|
|
parser.add_argument("--dry-run", action="store_true", help="Show command without executing") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
if args.check_only: |
|
|
setup_environment() |
|
|
check_required_files() |
|
|
check_gpu() |
|
|
create_output_dir() |
|
|
return |
|
|
|
|
|
if args.dry_run: |
|
|
env_vars = setup_environment() |
|
|
cmd = [ |
|
|
"deepspeed", |
|
|
"--include", f"localhost:{env_vars['RANK']}", |
|
|
"--master_port", env_vars['MASTER_PORT'], |
|
|
"vtimellm/train/train_dpo_mem.py", |
|
|
|
|
|
] |
|
|
print("Command that would be executed:") |
|
|
print(" ".join(cmd)) |
|
|
return |
|
|
|
|
|
success = run_training() |
|
|
sys.exit(0 if success else 1) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|