| | """ |
| | SFT (Supervised Fine-Tuning) script for the 1B Transformer. |
| | |
| | Takes the pretrained base model and fine-tunes it on instruction-response |
| | conversations from UltraChat 200K. |
| | |
| | Launch: torchrun --nproc_per_node=8 train_sft.py |
| | """ |
| |
|
| | import os |
| | import sys |
| | import math |
| | import time |
| | import json |
| | import datetime |
| |
|
| | import torch |
| | import torch.distributed as dist |
| | from torch.nn.parallel import DistributedDataParallel as DDP |
| | from torch.utils.data.distributed import DistributedSampler |
| |
|
| | sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) |
| | from model.config import ModelConfig |
| | from model.transformer import Transformer |
| | from model.data import get_tokenizer |
| | from model.sft_data import SFTDataset, sft_collate_fn |
| |
|
| |
|
| | |
| | BASE_CHECKPOINT = "/jfs/deepak-kumar/checkpoints/step_19000.pt" |
| | SFT_CHECKPOINT_DIR = "/jfs/deepak-kumar/checkpoints_sft" |
| | LOG_DIR = "/home/jovyan/training/logs" |
| | DATA_CACHE = "/jfs/deepak-kumar/data" |
| |
|
| | NUM_EPOCHS = 2 |
| | BATCH_SIZE_PER_GPU = 4 |
| | GRADIENT_ACCUMULATION = 4 |
| | MAX_SEQ_LEN = 2048 |
| | LEARNING_RATE = 2e-5 |
| | MIN_LR = 2e-6 |
| | WARMUP_STEPS = 200 |
| | WEIGHT_DECAY = 0.01 |
| | GRAD_CLIP = 1.0 |
| | LOG_INTERVAL = 10 |
| | SAVE_INTERVAL = 500 |
| |
|
| |
|
| | def get_cosine_lr(step, warmup_steps, total_steps, max_lr, min_lr): |
| | if step < warmup_steps: |
| | return max_lr * step / max(warmup_steps, 1) |
| | progress = (step - warmup_steps) / max(total_steps - warmup_steps, 1) |
| | return min_lr + 0.5 * (max_lr - min_lr) * (1 + math.cos(math.pi * progress)) |
| |
|
| |
|
| | def main(): |
| | dist.init_process_group("nccl", timeout=datetime.timedelta(minutes=30)) |
| | rank = int(os.environ.get("RANK", 0)) |
| | local_rank = int(os.environ.get("LOCAL_RANK", 0)) |
| | world_size = int(os.environ.get("WORLD_SIZE", 1)) |
| | torch.cuda.set_device(local_rank) |
| | device = torch.device(f"cuda:{local_rank}") |
| |
|
| | if rank == 0: |
| | os.makedirs(SFT_CHECKPOINT_DIR, exist_ok=True) |
| | os.makedirs(LOG_DIR, exist_ok=True) |
| | print("=" * 70) |
| | print(" SFT: INSTRUCTION FINE-TUNING 1B TRANSFORMER") |
| | print("=" * 70) |
| |
|
| | |
| | tokenizer = get_tokenizer() |
| |
|
| | |
| | model_config = ModelConfig() |
| | torch.manual_seed(42) |
| | model = Transformer(model_config) |
| |
|
| | if rank == 0: |
| | print(f"[Init] Loading base model from {BASE_CHECKPOINT}") |
| | ckpt = torch.load(BASE_CHECKPOINT, map_location="cpu", weights_only=False) |
| | model.load_state_dict(ckpt["model"]) |
| | base_step = ckpt.get("step", 0) |
| | base_loss = ckpt.get("loss", "?") |
| | if rank == 0: |
| | print(f"[Init] Base model: step={base_step}, pretrain_loss={base_loss}") |
| | del ckpt |
| |
|
| | |
| | special_tokens = ["<|user|>", "<|assistant|>", "<|end|>"] |
| | vocab = tokenizer.get_vocab() |
| | new_tokens = [t for t in special_tokens if t not in vocab] |
| | if new_tokens: |
| | tokenizer.add_tokens(new_tokens, special_tokens=True) |
| |
|
| | new_vocab_size = len(tokenizer) |
| | if new_vocab_size > model_config.vocab_size: |
| | if rank == 0: |
| | print(f"[Init] Expanding vocab: {model_config.vocab_size} -> {new_vocab_size}") |
| |
|
| | old_emb_weight = model.tok_embeddings.weight.data |
| | model.tok_embeddings = torch.nn.Embedding(new_vocab_size, model_config.hidden_dim) |
| | model.tok_embeddings.weight.data[:model_config.vocab_size] = old_emb_weight |
| | |
| | mean_emb = old_emb_weight.mean(dim=0) |
| | for i in range(model_config.vocab_size, new_vocab_size): |
| | model.tok_embeddings.weight.data[i] = mean_emb |
| |
|
| | old_output_weight = model.output.weight.data |
| | model.output = torch.nn.Linear(model_config.hidden_dim, new_vocab_size, bias=False) |
| | model.output.weight.data[:model_config.vocab_size] = old_output_weight |
| |
|
| | model.config.vocab_size = new_vocab_size |
| |
|
| | model = model.to(device) |
| | model = DDP(model, device_ids=[local_rank]) |
| |
|
| | if rank == 0: |
| | n = sum(p.numel() for p in model.parameters()) |
| | print(f"[Init] Params: {n:,} | GPUs: {world_size}x H100") |
| |
|
| | |
| | dataset = SFTDataset( |
| | tokenizer=tokenizer, |
| | max_seq_len=MAX_SEQ_LEN, |
| | split="train_sft", |
| | cache_dir=DATA_CACHE, |
| | ) |
| |
|
| | sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank, shuffle=True) |
| | dataloader = torch.utils.data.DataLoader( |
| | dataset, |
| | batch_size=BATCH_SIZE_PER_GPU, |
| | sampler=sampler, |
| | num_workers=4, |
| | pin_memory=True, |
| | collate_fn=lambda b: sft_collate_fn(b, pad_id=tokenizer.pad_token_id), |
| | ) |
| |
|
| | steps_per_epoch = len(dataloader) // GRADIENT_ACCUMULATION |
| | total_steps = steps_per_epoch * NUM_EPOCHS |
| |
|
| | if rank == 0: |
| | eff_batch = BATCH_SIZE_PER_GPU * world_size * GRADIENT_ACCUMULATION |
| | print(f"[Init] Dataset: {len(dataset):,} examples") |
| | print(f"[Init] Effective batch: {eff_batch} | Steps/epoch: {steps_per_epoch}") |
| | print(f"[Init] Total steps: {total_steps} | Epochs: {NUM_EPOCHS}") |
| | print(f"[Init] LR: {LEARNING_RATE} → {MIN_LR} (cosine)") |
| | print("-" * 70) |
| |
|
| | |
| | decay_params = [p for n, p in model.named_parameters() if p.dim() >= 2 and p.requires_grad] |
| | nodecay_params = [p for n, p in model.named_parameters() if p.dim() < 2 and p.requires_grad] |
| | optimizer = torch.optim.AdamW([ |
| | {"params": decay_params, "weight_decay": WEIGHT_DECAY}, |
| | {"params": nodecay_params, "weight_decay": 0.0}, |
| | ], lr=LEARNING_RATE, betas=(0.9, 0.95), fused=True) |
| |
|
| | |
| | model.train() |
| | global_step = 0 |
| | running_loss = 0.0 |
| | t0 = time.time() |
| | step_t0 = time.time() |
| |
|
| | log_file = open(os.path.join(LOG_DIR, "sft_log.jsonl"), "w") if rank == 0 else None |
| |
|
| | for epoch in range(NUM_EPOCHS): |
| | sampler.set_epoch(epoch) |
| | data_iter = iter(dataloader) |
| | micro_step = 0 |
| |
|
| | if rank == 0: |
| | print(f"\n[Epoch {epoch + 1}/{NUM_EPOCHS}]") |
| |
|
| | while True: |
| | optimizer.zero_grad(set_to_none=True) |
| | batch_loss = 0.0 |
| |
|
| | for _ in range(GRADIENT_ACCUMULATION): |
| | try: |
| | input_ids, labels = next(data_iter) |
| | except StopIteration: |
| | break |
| |
|
| | input_ids = input_ids.to(device, non_blocking=True) |
| | labels = labels.to(device, non_blocking=True) |
| |
|
| | with torch.autocast(device_type="cuda", dtype=torch.bfloat16): |
| | _, loss = model(input_ids, labels) |
| | loss = loss / GRADIENT_ACCUMULATION |
| |
|
| | loss.backward() |
| | batch_loss += loss.item() |
| | micro_step += 1 |
| |
|
| | if batch_loss == 0: |
| | break |
| |
|
| | torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_CLIP) |
| |
|
| | lr = get_cosine_lr(global_step, WARMUP_STEPS, total_steps, LEARNING_RATE, MIN_LR) |
| | for pg in optimizer.param_groups: |
| | pg["lr"] = lr |
| |
|
| | optimizer.step() |
| | global_step += 1 |
| | running_loss += batch_loss |
| |
|
| | if global_step % LOG_INTERVAL == 0: |
| | dt = time.time() - step_t0 |
| | avg = running_loss / LOG_INTERVAL |
| | elapsed = time.time() - t0 |
| | pct = 100.0 * global_step / total_steps |
| |
|
| | if rank == 0: |
| | gpu_mem = torch.cuda.max_memory_allocated(device) / 1e9 |
| | eta = (elapsed / max(global_step, 1)) * (total_steps - global_step) |
| | print( |
| | f" [Step {global_step:>5d}/{total_steps}] " |
| | f"loss={avg:.4f} | lr={lr:.2e} | " |
| | f"GPU={gpu_mem:.1f}GB | {pct:.1f}% | ETA={eta/60:.0f}m", |
| | flush=True, |
| | ) |
| | if log_file: |
| | log_file.write(json.dumps({ |
| | "step": global_step, "epoch": epoch + 1, |
| | "loss": round(avg, 4), "lr": lr, |
| | "elapsed_s": round(elapsed, 1), |
| | }) + "\n") |
| | log_file.flush() |
| |
|
| | running_loss = 0.0 |
| | step_t0 = time.time() |
| |
|
| | if global_step % SAVE_INTERVAL == 0: |
| | dist.barrier() |
| | if rank == 0: |
| | path = os.path.join(SFT_CHECKPOINT_DIR, f"sft_step_{global_step}.pt") |
| | torch.save({ |
| | "step": global_step, |
| | "model": model.module.state_dict(), |
| | "config": model_config.__dict__, |
| | "vocab_size": new_vocab_size, |
| | }, path) |
| | print(f" >> Checkpoint: {path}", flush=True) |
| | dist.barrier() |
| |
|
| | |
| | dist.barrier() |
| | if rank == 0: |
| | final_path = os.path.join(SFT_CHECKPOINT_DIR, "sft_final.pt") |
| | torch.save({ |
| | "step": global_step, |
| | "model": model.module.state_dict(), |
| | "config": model_config.__dict__, |
| | "vocab_size": new_vocab_size, |
| | }, final_path) |
| | total_time = time.time() - t0 |
| | print("=" * 70) |
| | print(f" SFT COMPLETE") |
| | print(f" Steps: {global_step:,} | Epochs: {NUM_EPOCHS}") |
| | print(f" Time: {total_time/60:.1f} minutes") |
| | print(f" Final model: {final_path}") |
| | print("=" * 70) |
| | if log_file: |
| | log_file.close() |
| |
|
| | dist.destroy_process_group() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|