littlefig-bench / benchmark_gpu_fixed.py
ticketguy's picture
GPU benchmark script (fixed for PyTorch 2.11)
5c41b47 verified
#!/usr/bin/env python3
"""Little Fig GPU Benchmark β€” Fixed for PyTorch 2.11"""
import os, sys, subprocess, json, time, gc, traceback
import numpy as np
print("[SETUP] Installing...", flush=True)
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q",
"transformers", "accelerate", "peft", "bitsandbytes",
"datasets", "sentencepiece", "protobuf", "psutil", "numpy"])
if not os.path.exists("/app/littlefig"):
subprocess.check_call(["git", "clone", "https://github.com/ticketguy/littlefig.git", "/app/littlefig"])
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "-e", "/app/littlefig[train]"])
sys.path.insert(0, "/app/littlefig/src")
import torch
import torch.nn.functional as F
MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
LORA_R = 16; LORA_ALPHA = 32; GROUP_SIZE = 128
LORA_TARGETS = ["q_proj", "k_proj", "v_proj", "o_proj"]
MAX_SEQ = 512; TRAIN_STEPS = 100; BATCH_SIZE = 4; GRAD_ACCUM = 4; LR = 2e-4
RESULTS = {}
def log(msg): print(f"[BENCH] {msg}", flush=True)
def gpu_mb(): return torch.cuda.max_memory_allocated()/1e6 if torch.cuda.is_available() else 0.0
def reset():
gc.collect()
if torch.cuda.is_available(): torch.cuda.empty_cache(); torch.cuda.reset_peak_memory_stats()
def safe_run(name, fn):
log(f"\n{'='*70}\n {name}\n{'='*70}")
try:
r = fn(); RESULTS[name] = r; log(f" βœ… {name}"); return r
except Exception as e:
log(f" ❌ {name}: {e}"); traceback.print_exc(); RESULTS[name] = {"error": str(e)}; return None
def meas(o, d):
o, d = o.reshape(-1).float(), d.reshape(-1).float()
mse = F.mse_loss(d, o).item()
cos = F.cosine_similarity(o.unsqueeze(0), d.unsqueeze(0)).item()
snr = 10*np.log10(o.pow(2).mean().item()/max(mse,1e-20))
return {"mse": mse, "cos": cos, "snr": snr}
def nf4_qd(W, gs=128):
s, n = W.shape, W.numel(); f = W.reshape(-1).float()
p = (gs-n%gs)%gs
if p>0: f = torch.cat([f, torch.zeros(p)])
g = f.reshape(-1,gs); sc = g.abs().amax(1).clamp(min=1e-10)
cb = torch.tensor([-1.0,-0.6962,-0.5251,-0.3949,-0.2844,-0.1848,-0.0911,0.0,0.0796,0.1609,0.2461,0.3379,0.4407,0.5626,0.7230,1.0])
idx = ((g/sc.unsqueeze(1)).reshape(-1).unsqueeze(1)-cb.unsqueeze(0)).abs().argmin(1).reshape(-1,gs)
return (torch.gather(cb.unsqueeze(0).expand(idx.shape[0],-1),1,idx.long())*sc.unsqueeze(1)).reshape(-1)[:n].reshape(s)
def bench_quant():
from transformers import AutoModelForCausalLM
from little_fig.engine.figquant import figquant_quantize, figquant_dequantize
log("Loading TinyLlama FP32..."); reset()
model = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.float32, low_cpu_mem_usage=True)
methods = {"figquant":{},"nf4":{}}; n=0; fw_n=0
for name, param in model.named_parameters():
if param.ndim!=2 or param.numel()<1024: continue
W = param.data.float()
q = figquant_quantize(W, group_size=GROUP_SIZE, n_iters=8)
ef = meas(W, figquant_dequantize(q)); en = meas(W, nf4_qd(W,GROUP_SIZE))
for m,e in [("figquant",ef),("nf4",en)]:
for k,v in e.items(): methods[m].setdefault(k,[]).append(v)
if ef["mse"]<en["mse"]: fw_n+=1
n+=1
if n%20==0: log(f" {n} layers...")
avgs = {m:{k:float(np.mean(v)) for k,v in d.items()} for m,d in methods.items()}
mvn = (avgs["nf4"]["mse"]-avgs["figquant"]["mse"])/avgs["nf4"]["mse"]*100
log(f" FigQuant vs NF4: {mvn:+.1f}% MSE (wins {fw_n}/{n})")
del model; gc.collect()
return {"avgs":avgs,"n":n,"fw_nf4":fw_n,"mvn":mvn}
def load_data():
from datasets import load_dataset
return load_dataset("tatsu-lab/alpaca", split="train").select(range(1000))
def _hf_loop(model, tokenizer, dataset, name):
dev = next(model.parameters()).device
def tok_fn(ex):
inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else \
f"### Instruction:\n{inst}\n\n### Response:\n{out}"
e = tokenizer(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
e["labels"] = e["input_ids"].copy(); return e
td = dataset.map(tok_fn, remove_columns=dataset.column_names); td.set_format("torch")
from torch.utils.data import DataLoader
dl = DataLoader(td, batch_size=BATCH_SIZE, shuffle=True,
collate_fn=lambda b: {k:torch.stack([x[k] for x in b]) for k in b[0] if isinstance(b[0][k],torch.Tensor)},
drop_last=True)
opt = torch.optim.AdamW([p for p in model.parameters() if p.requires_grad], lr=LR, weight_decay=0.01)
model.train(); losses=[]; times=[]; gs=0; al=0.0; reset(); t0=time.time()
for batch in dl:
if gs>=TRAIN_STEPS*GRAD_ACCUM: break
batch = {k:v.to(dev) for k,v in batch.items()}
ts = time.time()
with torch.autocast("cuda", dtype=torch.bfloat16):
loss = model(**batch).loss / GRAD_ACCUM
loss.backward(); al+=loss.item(); gs+=1
if gs%GRAD_ACCUM==0:
torch.nn.utils.clip_grad_norm_([p for p in model.parameters() if p.requires_grad], 1.0)
opt.step(); opt.zero_grad()
s=gs//GRAD_ACCUM; losses.append(al); times.append(time.time()-ts); al=0.0
if s%20==0: log(f" [{name}] step={s} loss={losses[-1]:.4f}")
tt=time.time()-t0; pm=gpu_mb()
del model,opt; gc.collect(); torch.cuda.empty_cache()
return {"method":name,"losses":[float(l) for l in losses],"final":float(losses[-1]) if losses else None,
"time_s":tt,"steps":len(losses),"ms_step":float(np.mean(times)*1000) if times else None,"gpu_mb":pm}
def train_fp16(ds):
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import LoraConfig, get_peft_model
reset(); log("Training FP16 LoRA...")
m = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.float16, device_map="auto")
t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
m.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant":False})
m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
return _hf_loop(m, t, ds, "fp16_lora")
def train_nf4(ds):
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
reset(); log("Training BnB NF4...")
m = AutoModelForCausalLM.from_pretrained(MODEL,
quantization_config=BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16), device_map="auto")
t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
m = prepare_model_for_kbit_training(m)
m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
return _hf_loop(m, t, ds, "bnb_nf4")
def train_figquant(ds):
from little_fig.engine import FigModel
from little_fig.engine.tier import TrainingTier
from torch.utils.data import DataLoader
reset(); log("Training FigQuant LoRA...")
model = FigModel.from_pretrained(MODEL, lora_r=LORA_R, lora_alpha=LORA_ALPHA,
tier=TrainingTier.STREAMING_LORA, group_size=GROUP_SIZE, target_modules=LORA_TARGETS)
tok = model.tokenizer
examples = [dict(r) for r in ds]
def tok_fn(ex):
inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else \
f"### Instruction:\n{inst}\n\n### Response:\n{out}"
e = tok(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
return {"input_ids": e["input_ids"], "labels": e["input_ids"].copy(), "attention_mask": e["attention_mask"]}
tokenized = [tok_fn(ex) for ex in examples]
class SimpleDS(torch.utils.data.Dataset):
def __init__(self, data): self.data = data
def __len__(self): return len(self.data)
def __getitem__(self, i): return {k: torch.tensor(v, dtype=torch.long) for k, v in self.data[i].items()}
dl = DataLoader(SimpleDS(tokenized), batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
dev = torch.device("cuda")
model = model.to(dev)
params = model.get_trainable_parameters()
opt = torch.optim.AdamW(params, lr=LR, weight_decay=0.01)
model.model.train(); losses=[]; times=[]; gs=0; al=0.0; reset(); t0=time.time()
for batch in dl:
if gs>=TRAIN_STEPS*GRAD_ACCUM: break
batch = {k:v.to(dev) for k,v in batch.items()}
ts = time.time()
with torch.autocast("cuda", dtype=torch.bfloat16):
loss = model(input_ids=batch["input_ids"], attention_mask=batch["attention_mask"],
labels=batch["labels"]).loss / GRAD_ACCUM
loss.backward(); al+=loss.item(); gs+=1
if gs%GRAD_ACCUM==0:
torch.nn.utils.clip_grad_norm_(params, 1.0); opt.step(); opt.zero_grad()
s=gs//GRAD_ACCUM; losses.append(al); times.append(time.time()-ts); al=0.0
if s%20==0: log(f" [figquant] step={s} loss={losses[-1]:.4f}")
tt=time.time()-t0; pm=gpu_mb()
del model,opt; gc.collect(); torch.cuda.empty_cache()
return {"method":"figquant_lora","losses":[float(l) for l in losses],"final":float(losses[-1]) if losses else None,
"time_s":tt,"steps":len(losses),"ms_step":float(np.mean(times)*1000) if times else None,"gpu_mb":pm}
if __name__ == "__main__":
log(f"🍐 Little Fig GPU Benchmark")
log(f" PyTorch {torch.__version__}, CUDA={torch.cuda.is_available()}")
if torch.cuda.is_available():
log(f" GPU: {torch.cuda.get_device_name()} ({torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB)")
safe_run("quant", bench_quant)
ds = load_data()
safe_run("t_fp16", lambda: train_fp16(ds))
safe_run("t_nf4", lambda: train_nf4(ds))
safe_run("t_fig", lambda: train_figquant(ds))
log("\n"+"="*80)
log(" 🍐 RESULTS")
log("="*80)
if "quant" in RESULTS and "error" not in RESULTS["quant"]:
q=RESULTS["quant"]
log(f"\n Quantization: FigQuant vs NF4: {q['mvn']:+.1f}% MSE ({q['fw_nf4']}/{q['n']} layers)")
log(f"\n {'Method':>12} {'Loss':>8} {'Time':>7} {'ms/s':>6} {'GPU MB':>8}")
log(f" {'─'*48}")
for k in ["t_fp16","t_nf4","t_fig"]:
if k in RESULTS and "error" not in RESULTS[k]:
r=RESULTS[k]
log(f" {r['method']:>12} {r['final']:.4f} {r['time_s']:.0f}s {r['ms_step']:.0f} {r['gpu_mb']:.0f}")
log("="*80)
with open("/app/results.json","w") as f: json.dump(RESULTS, f, indent=2, default=str)
log("πŸ“ Done.")