training / submit_training_job.py
davanstrien's picture
davanstrien HF Staff
Switch to pure UV approach without Docker image
ee342e6
#!/usr/bin/env python
"""
Submit Unsloth VLM fine-tuning job to HF Jobs.
This script submits a training job using the Unsloth Docker image with UV script execution.
Simplifies the process of running iconclass-vlm-sft.py on cloud GPUs.
"""
import os
from huggingface_hub import HfApi
from dotenv import load_dotenv
load_dotenv() # Load environment variables from .env file if present
# =============================================================================
# CONFIGURATION
# =============================================================================
# Model and dataset configuration
BASE_MODEL = "Qwen/Qwen3-VL-8B-Instruct"
DATASET = "davanstrien/iconclass-vlm-sft"
OUTPUT_MODEL = "davanstrien/Qwen3-VL-8B-iconclass-vlm"
# Training hyperparameters
BATCH_SIZE = 2
GRADIENT_ACCUMULATION = 8
MAX_STEPS = None # Set to None to use full dataset (1 epoch)
NUM_EPOCHS = 1.0 # Only used if MAX_STEPS is None
LEARNING_RATE = 2e-5
# LoRA configuration
LORA_R = 16
LORA_ALPHA = 32
LORA_DROPOUT = 0.1
# Training infrastructure
GPU_FLAVOR = "a100-large" # Options: a100-large, a100, a10g-large
TIMEOUT = "12h" # Adjust based on dataset size
# Script location
SCRIPT_URL = "https://huggingface.co/datasets/uv-scripts/training/raw/main/iconclass-vlm-sft.py"
# For local testing, you can also use a local path:
# SCRIPT_PATH = "/path/to/iconclass-vlm-sft.py"
# Optional: Calculate max_steps for full dataset
if MAX_STEPS is None:
from datasets import load_dataset
print("Calculating max_steps for full dataset...")
dataset = load_dataset(DATASET, split="train")
steps_per_epoch = len(dataset) // (BATCH_SIZE * GRADIENT_ACCUMULATION)
MAX_STEPS = int(steps_per_epoch * NUM_EPOCHS)
print(f"Dataset size: {len(dataset):,} samples")
print(f"Steps per epoch: {steps_per_epoch:,}")
print(f"Total steps ({NUM_EPOCHS} epoch(s)): {MAX_STEPS:,}")
print()
# =============================================================================
# SUBMISSION FUNCTION
# =============================================================================
def submit_training_job():
"""Submit VLM training job using HF Jobs with Unsloth Docker image."""
# Verify HF token is available
HF_TOKEN = os.environ.get("HF_TOKEN")
if not HF_TOKEN:
print("⚠️ HF_TOKEN not found in environment")
print("Please set: export HF_TOKEN=your_token_here")
print("Or add it to a .env file in this directory")
return
api = HfApi(token=HF_TOKEN)
# Build the script arguments
script_args = [
"--base-model",
BASE_MODEL,
"--dataset",
DATASET,
"--output-model",
OUTPUT_MODEL,
"--lora-r",
str(LORA_R),
"--lora-alpha",
str(LORA_ALPHA),
"--lora-dropout",
str(LORA_DROPOUT),
"--learning-rate",
str(LEARNING_RATE),
"--batch-size",
str(BATCH_SIZE),
"--gradient-accumulation",
str(GRADIENT_ACCUMULATION),
"--max-steps",
str(MAX_STEPS),
"--logging-steps",
"10",
"--save-steps",
"100",
"--eval-steps",
"100",
]
print("=" * 80)
print("Submitting Unsloth VLM Fine-tuning Job to HF Jobs")
print("=" * 80)
print(f"\n📦 Configuration:")
print(f" Base Model: {BASE_MODEL}")
print(f" Dataset: {DATASET}")
print(f" Output: {OUTPUT_MODEL}")
print(f"\n🎛️ Training Settings:")
print(f" Max Steps: {MAX_STEPS:,}")
print(f" Batch Size: {BATCH_SIZE}")
print(f" Grad Accum: {GRADIENT_ACCUMULATION}")
print(f" Effective BS: {BATCH_SIZE * GRADIENT_ACCUMULATION}")
print(f" Learning Rate: {LEARNING_RATE}")
print(f"\n🔧 LoRA Settings:")
print(f" Rank (r): {LORA_R}")
print(f" Alpha: {LORA_ALPHA}")
print(f" Dropout: {LORA_DROPOUT}")
print(f"\n💻 Infrastructure:")
print(f" GPU: {GPU_FLAVOR}")
print(f" Timeout: {TIMEOUT}")
print(f"\n🚀 Submitting job...")
# Submit the job using run_uv_job
job = api.run_uv_job(
script=SCRIPT_URL, # Can also be a local path
script_args=script_args,
dependencies=[], # UV handles all dependencies from script header
flavor=GPU_FLAVOR,
timeout=TIMEOUT,
env={
"HF_HUB_ENABLE_HF_TRANSFER": "1", # Fast downloads
},
secrets={
"HF_TOKEN": HF_TOKEN,
},
)
print("\n✅ Job submitted successfully!")
print("\n📊 Job Details:")
print(f" Job ID: {job.id}")
print(f" Status: {job.status}")
print(f" URL: https://huggingface.co/jobs/{job.id}")
print("\n💡 Monitor your job:")
print(f" • Web: https://huggingface.co/jobs/{job.id}")
print(f" • CLI: hfjobs status {job.id}")
print(f" • Logs: hfjobs logs {job.id} --follow")
print("\n🎯 Your model will be available at:")
print(f" https://huggingface.co/{OUTPUT_MODEL}")
print("\n" + "=" * 80)
return job
# =============================================================================
# MAIN
# =============================================================================
def main():
"""Main entry point."""
job = submit_training_job()
if job:
# Optional: Show Python code to monitor the job
print("\n📝 To monitor this job programmatically:")
print("""
from huggingface_hub import HfApi
api = HfApi()
job = api.get_job("{}")
print(job.status) # Check status
print(job.logs()) # View logs
""".format(job.id))
if __name__ == "__main__":
main()