File size: 7,592 Bytes
20145da | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 | #!/usr/bin/env bash
set -euo pipefail
# Verbose download progress in non-TTY environments (RunPod logs)
export HF_HUB_DISABLE_PROGRESS_BARS=0
export TRANSFORMERS_VERBOSITY=info
export HF_HUB_VERBOSITY=info
MODEL="${MODEL:-Qwen/Qwen3.5-4B}"
TRAIN_DATA="${TRAIN_DATA:-/workspace/data/train.jsonl}"
TEST_DATA="${TEST_DATA:-/workspace/data/test.jsonl}"
OUTPUT_DIR="${OUTPUT_DIR:-/workspace/output}"
NUM_EPOCHS="${NUM_EPOCHS:-3}"
BATCH_SIZE="${BATCH_SIZE:-1}"
GRAD_ACCUM="${GRAD_ACCUM:-4}"
LR="${LR:-2e-5}"
MAX_LENGTH="${MAX_LENGTH:-2048}"
SAVE_STEPS="${SAVE_STEPS:-10}"
EVAL_STEPS="${EVAL_STEPS:-${SAVE_STEPS}}"
SAVE_TOTAL_LIMIT="${SAVE_TOTAL_LIMIT:-5}"
LOGGING_STEPS="${LOGGING_STEPS:-5}"
USE_HF="${USE_HF:-true}"
TUNER_TYPE="${TUNER_TYPE:-full}"
WARMUP_RATIO="${WARMUP_RATIO:-0.1}"
LR_SCHEDULER_TYPE="${LR_SCHEDULER_TYPE:-cosine}"
WEIGHT_DECAY="${WEIGHT_DECAY:-0.1}"
MAX_GRAD_NORM="${MAX_GRAD_NORM:-1.0}"
OPTIMIZER="${OPTIMIZER:-adamw_torch}"
SEED="${SEED:-42}"
NEFTUNE_ALPHA="${NEFTUNE_ALPHA:-0}"
PACKING="${PACKING:-false}"
SHUFFLE_DATASET="${SHUFFLE_DATASET:-false}"
LAZY_TOKENIZE="${LAZY_TOKENIZE:-true}"
DATASET_NUM_PROC="${DATASET_NUM_PROC:-4}"
ATTN_IMPL="${ATTN_IMPL:-flash_attn}"
DEEPSPEED_CONFIG="${DEEPSPEED_CONFIG:-}"
WANDB_PROJECT="${WANDB_PROJECT:-}"
RESUME_FROM="${RESUME_FROM:-}"
USE_FLASH_CKPT="${USE_FLASH_CKPT:-false}"
EARLY_STOPPING_PATIENCE="${EARLY_STOPPING_PATIENCE:-}"
EARLY_STOPPING_THRESHOLD="${EARLY_STOPPING_THRESHOLD:-0.0}"
NUM_GPUS=$(nvidia-smi -L 2>/dev/null | wc -l)
NUM_GPUS=${NUM_GPUS:-1}
if [ "${NUM_GPUS}" -lt 1 ]; then
NUM_GPUS=1
fi
NPROC_PER_NODE="${NPROC_PER_NODE:-${NUM_GPUS}}"
if [ "${NPROC_PER_NODE}" -gt 1 ] && [ -z "${DEEPSPEED_CONFIG}" ]; then
DEEPSPEED_CONFIG="zero3"
fi
GPU_IDS=$(seq -s, 0 $((NPROC_PER_NODE - 1)))
export CUDA_VISIBLE_DEVICES="${CUDA_VISIBLE_DEVICES:-${GPU_IDS}}"
echo "============================================"
echo " Qwen 3.5 Fine-Tuning with ms-swift"
echo "============================================"
echo "Model: ${MODEL}"
echo "Train data: ${TRAIN_DATA}"
echo "Test data: ${TEST_DATA}"
echo "Output: ${OUTPUT_DIR}"
echo "Tuner: ${TUNER_TYPE}"
echo "Epochs: ${NUM_EPOCHS}"
echo "Batch size: ${BATCH_SIZE}"
echo "Grad accum: ${GRAD_ACCUM}"
echo "LR: ${LR}"
echo "Max length: ${MAX_LENGTH}"
echo "GPUs: ${NPROC_PER_NODE} (CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES})"
echo "Eval steps: ${EVAL_STEPS}"
echo "Save limit: ${SAVE_TOTAL_LIMIT}"
echo "Warmup ratio: ${WARMUP_RATIO}"
echo "LR scheduler: ${LR_SCHEDULER_TYPE}"
echo "Weight decay: ${WEIGHT_DECAY}"
echo "Grad clip: ${MAX_GRAD_NORM}"
echo "Optimizer: ${OPTIMIZER}"
echo "Seed: ${SEED}"
echo "NEFTune: ${NEFTUNE_ALPHA}"
echo "Packing: ${PACKING}"
echo "Lazy tokenize:${LAZY_TOKENIZE}"
echo "Dataset procs:${DATASET_NUM_PROC}"
echo "Attn impl: ${ATTN_IMPL}"
echo "DeepSpeed: ${DEEPSPEED_CONFIG:-none}"
echo "W&B project: ${WANDB_PROJECT:-disabled}"
echo "Flash ckpt: ${USE_FLASH_CKPT}"
echo "Early stop: ${EARLY_STOPPING_PATIENCE:-disabled}"
echo "Resume from: ${RESUME_FROM:-none}"
echo "============================================"
EXTRA_ARGS=()
HAS_VAL=false
if [ -f "${TEST_DATA}" ] || [[ "${TEST_DATA}" == */* && ! "${TEST_DATA}" == /* ]]; then
EXTRA_ARGS+=(--val_dataset "${TEST_DATA}")
HAS_VAL=true
fi
if [ -n "${EARLY_STOPPING_PATIENCE}" ] && [ "${HAS_VAL}" = "true" ]; then
EXTRA_ARGS+=(
--load_best_model_at_end true
--metric_for_best_model eval_loss
--greater_is_better false
--early_stopping_patience "${EARLY_STOPPING_PATIENCE}"
)
if [ "${EARLY_STOPPING_THRESHOLD}" != "0.0" ] && [ -n "${EARLY_STOPPING_THRESHOLD}" ]; then
EXTRA_ARGS+=(--early_stopping_threshold "${EARLY_STOPPING_THRESHOLD}")
fi
if [ "${SAVE_TOTAL_LIMIT}" -lt 2 ]; then
SAVE_TOTAL_LIMIT=2
echo "Bumped SAVE_TOTAL_LIMIT to 2 (required for load_best_model_at_end)"
fi
elif [ -n "${EARLY_STOPPING_PATIENCE}" ]; then
echo "WARNING: EARLY_STOPPING_PATIENCE ignored — no validation data configured"
fi
if [ -n "${DEEPSPEED_CONFIG}" ]; then
EXTRA_ARGS+=(--deepspeed "${DEEPSPEED_CONFIG}")
fi
if [ -n "${WANDB_PROJECT}" ] || [ -n "${WANDB_API_KEY:-}" ]; then
EXTRA_ARGS+=(--report_to wandb)
fi
if [ -n "${RESUME_FROM}" ]; then
if [ "${RESUME_FROM}" = "auto" ]; then
LATEST_CKPT=$(ls -td "${OUTPUT_DIR}"/*/checkpoint-* "${OUTPUT_DIR}"/checkpoint-* 2>/dev/null | head -1)
if [ -n "${LATEST_CKPT}" ]; then
echo "Auto-resume: found ${LATEST_CKPT}"
EXTRA_ARGS+=(--resume_from_checkpoint "${LATEST_CKPT}")
else
echo "Auto-resume: no checkpoint found, starting fresh"
fi
else
EXTRA_ARGS+=(--resume_from_checkpoint "${RESUME_FROM}")
fi
fi
export NPROC_PER_NODE
# Pre-flight: verify flash-linear-attention is available.
# Qwen 3.5 silently falls back to O(n²) GatedDeltaNet without it.
echo ""
echo "Pre-flight checks..."
python3 -c "
try:
import fla
from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule
print('[Pre-flight] flash-linear-attention OK — GatedDeltaNet uses FLA kernels')
except ImportError as e:
print(f'[Pre-flight] WARNING: flash-linear-attention not importable: {e}')
print(' GatedDeltaNet layers will use naive O(n^2) fallback — expect 2-3x VRAM')
try:
import causal_conv1d
print('[Pre-flight] causal-conv1d OK')
except ImportError:
print('[Pre-flight] WARNING: causal-conv1d not available')
"
# Pre-download model with visible progress before ms-swift starts.
# ms-swift/transformers download logging is minimal in non-TTY envs.
echo ""
echo "Pre-downloading model ${MODEL} (if not cached)..."
python3 -c "
import os
os.environ['HF_HUB_DISABLE_PROGRESS_BARS'] = '0'
from huggingface_hub import snapshot_download, logging
logging.set_verbosity_info()
snapshot_download('${MODEL}', ignore_patterns=['*.gguf', '*.ggml'])
print('Model download complete.', flush=True)
"
echo ""
CMD_ARGS=(
--model "${MODEL}"
--dataset "${TRAIN_DATA}"
--tuner_type "${TUNER_TYPE}"
--torch_dtype bfloat16
--num_train_epochs "${NUM_EPOCHS}"
--per_device_train_batch_size "${BATCH_SIZE}"
--per_device_eval_batch_size "${BATCH_SIZE}"
--learning_rate "${LR}"
--gradient_accumulation_steps "${GRAD_ACCUM}"
--eval_strategy steps
--eval_steps "${EVAL_STEPS}"
--save_steps "${SAVE_STEPS}"
--save_total_limit "${SAVE_TOTAL_LIMIT}"
--logging_steps "${LOGGING_STEPS}"
--max_length "${MAX_LENGTH}"
--output_dir "${OUTPUT_DIR}"
--warmup_ratio "${WARMUP_RATIO}"
--lr_scheduler_type "${LR_SCHEDULER_TYPE}"
--weight_decay "${WEIGHT_DECAY}"
--max_grad_norm "${MAX_GRAD_NORM}"
--optim "${OPTIMIZER}"
--seed "${SEED}"
--dataloader_num_workers 4
--lazy_tokenize "${LAZY_TOKENIZE}"
--dataset_num_proc "${DATASET_NUM_PROC}"
--attn_impl "${ATTN_IMPL}"
--use_hf "${USE_HF}"
--gradient_checkpointing true
--use_flash_ckpt "${USE_FLASH_CKPT}"
)
if [ "${NEFTUNE_ALPHA}" != "0" ] && [ -n "${NEFTUNE_ALPHA}" ]; then
CMD_ARGS+=(--neftune_noise_alpha "${NEFTUNE_ALPHA}")
fi
if [ "${PACKING}" = "true" ]; then
CMD_ARGS+=(--packing true)
fi
if [ "${SHUFFLE_DATASET}" = "true" ]; then
CMD_ARGS+=(--dataset_shuffle true)
fi
swift sft "${CMD_ARGS[@]}" "${EXTRA_ARGS[@]}"
echo "============================================"
echo " Training complete!"
echo " Output saved to: ${OUTPUT_DIR}"
echo "============================================"
|