File size: 5,686 Bytes
eb70165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee342e6
eb70165
 
 
ee342e6
eb70165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
#!/usr/bin/env python
"""
Submit Unsloth VLM fine-tuning job to HF Jobs.

This script submits a training job using the Unsloth Docker image with UV script execution.
Simplifies the process of running iconclass-vlm-sft.py on cloud GPUs.
"""

import os
from huggingface_hub import HfApi
from dotenv import load_dotenv

load_dotenv()  # Load environment variables from .env file if present


# =============================================================================
# CONFIGURATION
# =============================================================================

# Model and dataset configuration
BASE_MODEL = "Qwen/Qwen3-VL-8B-Instruct"
DATASET = "davanstrien/iconclass-vlm-sft"
OUTPUT_MODEL = "davanstrien/Qwen3-VL-8B-iconclass-vlm"

# Training hyperparameters
BATCH_SIZE = 2
GRADIENT_ACCUMULATION = 8
MAX_STEPS = None  # Set to None to use full dataset (1 epoch)
NUM_EPOCHS = 1.0  # Only used if MAX_STEPS is None
LEARNING_RATE = 2e-5

# LoRA configuration
LORA_R = 16
LORA_ALPHA = 32
LORA_DROPOUT = 0.1

# Training infrastructure
GPU_FLAVOR = "a100-large"  # Options: a100-large, a100, a10g-large
TIMEOUT = "12h"  # Adjust based on dataset size

# Script location
SCRIPT_URL = "https://huggingface.co/datasets/uv-scripts/training/raw/main/iconclass-vlm-sft.py"
# For local testing, you can also use a local path:
# SCRIPT_PATH = "/path/to/iconclass-vlm-sft.py"

# Optional: Calculate max_steps for full dataset
if MAX_STEPS is None:
    from datasets import load_dataset

    print("Calculating max_steps for full dataset...")
    dataset = load_dataset(DATASET, split="train")
    steps_per_epoch = len(dataset) // (BATCH_SIZE * GRADIENT_ACCUMULATION)
    MAX_STEPS = int(steps_per_epoch * NUM_EPOCHS)
    print(f"Dataset size: {len(dataset):,} samples")
    print(f"Steps per epoch: {steps_per_epoch:,}")
    print(f"Total steps ({NUM_EPOCHS} epoch(s)): {MAX_STEPS:,}")
    print()


# =============================================================================
# SUBMISSION FUNCTION
# =============================================================================


def submit_training_job():
    """Submit VLM training job using HF Jobs with Unsloth Docker image."""

    # Verify HF token is available
    HF_TOKEN = os.environ.get("HF_TOKEN")
    if not HF_TOKEN:
        print("⚠️  HF_TOKEN not found in environment")
        print("Please set: export HF_TOKEN=your_token_here")
        print("Or add it to a .env file in this directory")
        return

    api = HfApi(token=HF_TOKEN)

    # Build the script arguments
    script_args = [
        "--base-model",
        BASE_MODEL,
        "--dataset",
        DATASET,
        "--output-model",
        OUTPUT_MODEL,
        "--lora-r",
        str(LORA_R),
        "--lora-alpha",
        str(LORA_ALPHA),
        "--lora-dropout",
        str(LORA_DROPOUT),
        "--learning-rate",
        str(LEARNING_RATE),
        "--batch-size",
        str(BATCH_SIZE),
        "--gradient-accumulation",
        str(GRADIENT_ACCUMULATION),
        "--max-steps",
        str(MAX_STEPS),
        "--logging-steps",
        "10",
        "--save-steps",
        "100",
        "--eval-steps",
        "100",
    ]

    print("=" * 80)
    print("Submitting Unsloth VLM Fine-tuning Job to HF Jobs")
    print("=" * 80)
    print(f"\n📦 Configuration:")
    print(f"  Base Model:    {BASE_MODEL}")
    print(f"  Dataset:       {DATASET}")
    print(f"  Output:        {OUTPUT_MODEL}")
    print(f"\n🎛️  Training Settings:")
    print(f"  Max Steps:     {MAX_STEPS:,}")
    print(f"  Batch Size:    {BATCH_SIZE}")
    print(f"  Grad Accum:    {GRADIENT_ACCUMULATION}")
    print(f"  Effective BS:  {BATCH_SIZE * GRADIENT_ACCUMULATION}")
    print(f"  Learning Rate: {LEARNING_RATE}")
    print(f"\n🔧 LoRA Settings:")
    print(f"  Rank (r):      {LORA_R}")
    print(f"  Alpha:         {LORA_ALPHA}")
    print(f"  Dropout:       {LORA_DROPOUT}")
    print(f"\n💻 Infrastructure:")
    print(f"  GPU:           {GPU_FLAVOR}")
    print(f"  Timeout:       {TIMEOUT}")
    print(f"\n🚀 Submitting job...")

    # Submit the job using run_uv_job
    job = api.run_uv_job(
        script=SCRIPT_URL,  # Can also be a local path
        script_args=script_args,
        dependencies=[],  # UV handles all dependencies from script header
        flavor=GPU_FLAVOR,
        timeout=TIMEOUT,
        env={
            "HF_HUB_ENABLE_HF_TRANSFER": "1",  # Fast downloads
        },
        secrets={
            "HF_TOKEN": HF_TOKEN,
        },
    )

    print("\n✅ Job submitted successfully!")
    print("\n📊 Job Details:")
    print(f"  Job ID:  {job.id}")
    print(f"  Status:  {job.status}")
    print(f"  URL:     https://huggingface.co/jobs/{job.id}")
    print("\n💡 Monitor your job:")
    print(f"  • Web: https://huggingface.co/jobs/{job.id}")
    print(f"  • CLI: hfjobs status {job.id}")
    print(f"  • Logs: hfjobs logs {job.id} --follow")
    print("\n🎯 Your model will be available at:")
    print(f"  https://huggingface.co/{OUTPUT_MODEL}")
    print("\n" + "=" * 80)

    return job


# =============================================================================
# MAIN
# =============================================================================


def main():
    """Main entry point."""
    job = submit_training_job()

    if job:
        # Optional: Show Python code to monitor the job
        print("\n📝 To monitor this job programmatically:")
        print("""
from huggingface_hub import HfApi

api = HfApi()
job = api.get_job("{}")
print(job.status)  # Check status
print(job.logs())  # View logs
        """.format(job.id))


if __name__ == "__main__":
    main()