gregjanik's picture
Upload folder using huggingface_hub
20145da verified
#!/usr/bin/env bash
set -euo pipefail
# Verbose download progress in non-TTY environments (RunPod logs)
export HF_HUB_DISABLE_PROGRESS_BARS=0
export TRANSFORMERS_VERBOSITY=info
export HF_HUB_VERBOSITY=info
MODEL="${MODEL:-Qwen/Qwen3.5-4B}"
TRAIN_DATA="${TRAIN_DATA:-/workspace/data/train.jsonl}"
TEST_DATA="${TEST_DATA:-/workspace/data/test.jsonl}"
OUTPUT_DIR="${OUTPUT_DIR:-/workspace/output}"
NUM_EPOCHS="${NUM_EPOCHS:-3}"
BATCH_SIZE="${BATCH_SIZE:-1}"
GRAD_ACCUM="${GRAD_ACCUM:-4}"
LR="${LR:-2e-5}"
MAX_LENGTH="${MAX_LENGTH:-2048}"
SAVE_STEPS="${SAVE_STEPS:-10}"
EVAL_STEPS="${EVAL_STEPS:-${SAVE_STEPS}}"
SAVE_TOTAL_LIMIT="${SAVE_TOTAL_LIMIT:-5}"
LOGGING_STEPS="${LOGGING_STEPS:-5}"
USE_HF="${USE_HF:-true}"
TUNER_TYPE="${TUNER_TYPE:-full}"
WARMUP_RATIO="${WARMUP_RATIO:-0.1}"
LR_SCHEDULER_TYPE="${LR_SCHEDULER_TYPE:-cosine}"
WEIGHT_DECAY="${WEIGHT_DECAY:-0.1}"
MAX_GRAD_NORM="${MAX_GRAD_NORM:-1.0}"
OPTIMIZER="${OPTIMIZER:-adamw_torch}"
SEED="${SEED:-42}"
NEFTUNE_ALPHA="${NEFTUNE_ALPHA:-0}"
PACKING="${PACKING:-false}"
SHUFFLE_DATASET="${SHUFFLE_DATASET:-false}"
LAZY_TOKENIZE="${LAZY_TOKENIZE:-true}"
DATASET_NUM_PROC="${DATASET_NUM_PROC:-4}"
ATTN_IMPL="${ATTN_IMPL:-flash_attn}"
DEEPSPEED_CONFIG="${DEEPSPEED_CONFIG:-}"
WANDB_PROJECT="${WANDB_PROJECT:-}"
RESUME_FROM="${RESUME_FROM:-}"
USE_FLASH_CKPT="${USE_FLASH_CKPT:-false}"
EARLY_STOPPING_PATIENCE="${EARLY_STOPPING_PATIENCE:-}"
EARLY_STOPPING_THRESHOLD="${EARLY_STOPPING_THRESHOLD:-0.0}"
NUM_GPUS=$(nvidia-smi -L 2>/dev/null | wc -l)
NUM_GPUS=${NUM_GPUS:-1}
if [ "${NUM_GPUS}" -lt 1 ]; then
NUM_GPUS=1
fi
NPROC_PER_NODE="${NPROC_PER_NODE:-${NUM_GPUS}}"
if [ "${NPROC_PER_NODE}" -gt 1 ] && [ -z "${DEEPSPEED_CONFIG}" ]; then
DEEPSPEED_CONFIG="zero3"
fi
GPU_IDS=$(seq -s, 0 $((NPROC_PER_NODE - 1)))
export CUDA_VISIBLE_DEVICES="${CUDA_VISIBLE_DEVICES:-${GPU_IDS}}"
echo "============================================"
echo " Qwen 3.5 Fine-Tuning with ms-swift"
echo "============================================"
echo "Model: ${MODEL}"
echo "Train data: ${TRAIN_DATA}"
echo "Test data: ${TEST_DATA}"
echo "Output: ${OUTPUT_DIR}"
echo "Tuner: ${TUNER_TYPE}"
echo "Epochs: ${NUM_EPOCHS}"
echo "Batch size: ${BATCH_SIZE}"
echo "Grad accum: ${GRAD_ACCUM}"
echo "LR: ${LR}"
echo "Max length: ${MAX_LENGTH}"
echo "GPUs: ${NPROC_PER_NODE} (CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES})"
echo "Eval steps: ${EVAL_STEPS}"
echo "Save limit: ${SAVE_TOTAL_LIMIT}"
echo "Warmup ratio: ${WARMUP_RATIO}"
echo "LR scheduler: ${LR_SCHEDULER_TYPE}"
echo "Weight decay: ${WEIGHT_DECAY}"
echo "Grad clip: ${MAX_GRAD_NORM}"
echo "Optimizer: ${OPTIMIZER}"
echo "Seed: ${SEED}"
echo "NEFTune: ${NEFTUNE_ALPHA}"
echo "Packing: ${PACKING}"
echo "Lazy tokenize:${LAZY_TOKENIZE}"
echo "Dataset procs:${DATASET_NUM_PROC}"
echo "Attn impl: ${ATTN_IMPL}"
echo "DeepSpeed: ${DEEPSPEED_CONFIG:-none}"
echo "W&B project: ${WANDB_PROJECT:-disabled}"
echo "Flash ckpt: ${USE_FLASH_CKPT}"
echo "Early stop: ${EARLY_STOPPING_PATIENCE:-disabled}"
echo "Resume from: ${RESUME_FROM:-none}"
echo "============================================"
EXTRA_ARGS=()
HAS_VAL=false
if [ -f "${TEST_DATA}" ] || [[ "${TEST_DATA}" == */* && ! "${TEST_DATA}" == /* ]]; then
EXTRA_ARGS+=(--val_dataset "${TEST_DATA}")
HAS_VAL=true
fi
if [ -n "${EARLY_STOPPING_PATIENCE}" ] && [ "${HAS_VAL}" = "true" ]; then
EXTRA_ARGS+=(
--load_best_model_at_end true
--metric_for_best_model eval_loss
--greater_is_better false
--early_stopping_patience "${EARLY_STOPPING_PATIENCE}"
)
if [ "${EARLY_STOPPING_THRESHOLD}" != "0.0" ] && [ -n "${EARLY_STOPPING_THRESHOLD}" ]; then
EXTRA_ARGS+=(--early_stopping_threshold "${EARLY_STOPPING_THRESHOLD}")
fi
if [ "${SAVE_TOTAL_LIMIT}" -lt 2 ]; then
SAVE_TOTAL_LIMIT=2
echo "Bumped SAVE_TOTAL_LIMIT to 2 (required for load_best_model_at_end)"
fi
elif [ -n "${EARLY_STOPPING_PATIENCE}" ]; then
echo "WARNING: EARLY_STOPPING_PATIENCE ignored — no validation data configured"
fi
if [ -n "${DEEPSPEED_CONFIG}" ]; then
EXTRA_ARGS+=(--deepspeed "${DEEPSPEED_CONFIG}")
fi
if [ -n "${WANDB_PROJECT}" ] || [ -n "${WANDB_API_KEY:-}" ]; then
EXTRA_ARGS+=(--report_to wandb)
fi
if [ -n "${RESUME_FROM}" ]; then
if [ "${RESUME_FROM}" = "auto" ]; then
LATEST_CKPT=$(ls -td "${OUTPUT_DIR}"/*/checkpoint-* "${OUTPUT_DIR}"/checkpoint-* 2>/dev/null | head -1)
if [ -n "${LATEST_CKPT}" ]; then
echo "Auto-resume: found ${LATEST_CKPT}"
EXTRA_ARGS+=(--resume_from_checkpoint "${LATEST_CKPT}")
else
echo "Auto-resume: no checkpoint found, starting fresh"
fi
else
EXTRA_ARGS+=(--resume_from_checkpoint "${RESUME_FROM}")
fi
fi
export NPROC_PER_NODE
# Pre-flight: verify flash-linear-attention is available.
# Qwen 3.5 silently falls back to O(n²) GatedDeltaNet without it.
echo ""
echo "Pre-flight checks..."
python3 -c "
try:
import fla
from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule
print('[Pre-flight] flash-linear-attention OK — GatedDeltaNet uses FLA kernels')
except ImportError as e:
print(f'[Pre-flight] WARNING: flash-linear-attention not importable: {e}')
print(' GatedDeltaNet layers will use naive O(n^2) fallback — expect 2-3x VRAM')
try:
import causal_conv1d
print('[Pre-flight] causal-conv1d OK')
except ImportError:
print('[Pre-flight] WARNING: causal-conv1d not available')
"
# Pre-download model with visible progress before ms-swift starts.
# ms-swift/transformers download logging is minimal in non-TTY envs.
echo ""
echo "Pre-downloading model ${MODEL} (if not cached)..."
python3 -c "
import os
os.environ['HF_HUB_DISABLE_PROGRESS_BARS'] = '0'
from huggingface_hub import snapshot_download, logging
logging.set_verbosity_info()
snapshot_download('${MODEL}', ignore_patterns=['*.gguf', '*.ggml'])
print('Model download complete.', flush=True)
"
echo ""
CMD_ARGS=(
--model "${MODEL}"
--dataset "${TRAIN_DATA}"
--tuner_type "${TUNER_TYPE}"
--torch_dtype bfloat16
--num_train_epochs "${NUM_EPOCHS}"
--per_device_train_batch_size "${BATCH_SIZE}"
--per_device_eval_batch_size "${BATCH_SIZE}"
--learning_rate "${LR}"
--gradient_accumulation_steps "${GRAD_ACCUM}"
--eval_strategy steps
--eval_steps "${EVAL_STEPS}"
--save_steps "${SAVE_STEPS}"
--save_total_limit "${SAVE_TOTAL_LIMIT}"
--logging_steps "${LOGGING_STEPS}"
--max_length "${MAX_LENGTH}"
--output_dir "${OUTPUT_DIR}"
--warmup_ratio "${WARMUP_RATIO}"
--lr_scheduler_type "${LR_SCHEDULER_TYPE}"
--weight_decay "${WEIGHT_DECAY}"
--max_grad_norm "${MAX_GRAD_NORM}"
--optim "${OPTIMIZER}"
--seed "${SEED}"
--dataloader_num_workers 4
--lazy_tokenize "${LAZY_TOKENIZE}"
--dataset_num_proc "${DATASET_NUM_PROC}"
--attn_impl "${ATTN_IMPL}"
--use_hf "${USE_HF}"
--gradient_checkpointing true
--use_flash_ckpt "${USE_FLASH_CKPT}"
)
if [ "${NEFTUNE_ALPHA}" != "0" ] && [ -n "${NEFTUNE_ALPHA}" ]; then
CMD_ARGS+=(--neftune_noise_alpha "${NEFTUNE_ALPHA}")
fi
if [ "${PACKING}" = "true" ]; then
CMD_ARGS+=(--packing true)
fi
if [ "${SHUFFLE_DATASET}" = "true" ]; then
CMD_ARGS+=(--dataset_shuffle true)
fi
swift sft "${CMD_ARGS[@]}" "${EXTRA_ARGS[@]}"
echo "============================================"
echo " Training complete!"
echo " Output saved to: ${OUTPUT_DIR}"
echo "============================================"