Upload folder using huggingface_hub
Browse files- make_hellaswag_subset.py +34 -0
- measure_ttft.py +103 -0
- parse_results.py +407 -0
- run_benchmark.sh +184 -0
make_hellaswag_subset.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Extract a seeded-random 400-task subset of HellaSwag matching GPTQModel's selection."""
|
| 3 |
+
|
| 4 |
+
import random
|
| 5 |
+
import sys
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
SEED = 1
|
| 9 |
+
N_SAMPLES = 400
|
| 10 |
+
LINES_PER_TASK = 6
|
| 11 |
+
|
| 12 |
+
def main():
|
| 13 |
+
root = Path(__file__).resolve().parent.parent
|
| 14 |
+
full_path = root / "hellaswag_val_full.txt"
|
| 15 |
+
out_path = root / "hellaswag_val_400.txt"
|
| 16 |
+
|
| 17 |
+
lines = full_path.read_text().splitlines()
|
| 18 |
+
n_tasks = len(lines) // LINES_PER_TASK
|
| 19 |
+
|
| 20 |
+
indices = list(range(n_tasks))
|
| 21 |
+
rng = random.Random(SEED)
|
| 22 |
+
rng.shuffle(indices)
|
| 23 |
+
selected = indices[:N_SAMPLES]
|
| 24 |
+
|
| 25 |
+
with open(out_path, "w") as f:
|
| 26 |
+
for idx in selected:
|
| 27 |
+
start = idx * LINES_PER_TASK
|
| 28 |
+
for line in lines[start:start + LINES_PER_TASK]:
|
| 29 |
+
f.write(line + "\n")
|
| 30 |
+
|
| 31 |
+
print(f"Wrote {N_SAMPLES} tasks to {out_path}")
|
| 32 |
+
|
| 33 |
+
if __name__ == "__main__":
|
| 34 |
+
main()
|
measure_ttft.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Measure TTFT and TPOT by streaming from llama-server."""
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import json
|
| 6 |
+
import subprocess
|
| 7 |
+
import sys
|
| 8 |
+
import time
|
| 9 |
+
import requests
|
| 10 |
+
|
| 11 |
+
SERVER_BIN = "./build/bin/llama-server"
|
| 12 |
+
PORT = 8081
|
| 13 |
+
PROMPT = "Explain the difference between machine learning and deep learning in detail." * 8 # ~512 tokens
|
| 14 |
+
MAX_TOKENS = 128
|
| 15 |
+
RUNS = 3
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def wait_for_server(address, timeout=120):
|
| 19 |
+
for _ in range(timeout):
|
| 20 |
+
try:
|
| 21 |
+
r = requests.get(f"{address}/health", timeout=2)
|
| 22 |
+
if r.status_code == 200:
|
| 23 |
+
return True
|
| 24 |
+
except Exception:
|
| 25 |
+
pass
|
| 26 |
+
time.sleep(1)
|
| 27 |
+
return False
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def measure_once(address):
|
| 31 |
+
payload = {
|
| 32 |
+
"messages": [{"role": "user", "content": PROMPT}],
|
| 33 |
+
"max_tokens": MAX_TOKENS,
|
| 34 |
+
"stream": True,
|
| 35 |
+
"temperature": 0.0,
|
| 36 |
+
}
|
| 37 |
+
t0 = time.perf_counter()
|
| 38 |
+
ttft = None
|
| 39 |
+
n_tokens = 0
|
| 40 |
+
with requests.post(f"{address}/v1/chat/completions", json=payload, stream=True, timeout=120) as resp:
|
| 41 |
+
for line in resp.iter_lines():
|
| 42 |
+
if not line:
|
| 43 |
+
continue
|
| 44 |
+
line = line.decode("utf-8")
|
| 45 |
+
if not line.startswith("data:"):
|
| 46 |
+
continue
|
| 47 |
+
data = line[5:].strip()
|
| 48 |
+
if data == "[DONE]":
|
| 49 |
+
break
|
| 50 |
+
try:
|
| 51 |
+
chunk = json.loads(data)
|
| 52 |
+
delta = chunk["choices"][0]["delta"].get("content", "")
|
| 53 |
+
if delta:
|
| 54 |
+
if ttft is None:
|
| 55 |
+
ttft = time.perf_counter() - t0
|
| 56 |
+
n_tokens += 1
|
| 57 |
+
except Exception:
|
| 58 |
+
continue
|
| 59 |
+
t_total = time.perf_counter() - t0
|
| 60 |
+
tpot = (t_total - ttft) / max(n_tokens - 1, 1) if n_tokens > 1 else 0
|
| 61 |
+
return ttft, tpot, n_tokens, t_total
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def main():
|
| 65 |
+
parser = argparse.ArgumentParser()
|
| 66 |
+
parser.add_argument("-m", "--model", required=True)
|
| 67 |
+
parser.add_argument("--runs", type=int, default=RUNS)
|
| 68 |
+
parser.add_argument("--device", default=None,
|
| 69 |
+
help="GGML device string, e.g. CUDA0 (default: all GPUs)")
|
| 70 |
+
args = parser.parse_args()
|
| 71 |
+
|
| 72 |
+
address = f"http://127.0.0.1:{PORT}"
|
| 73 |
+
cmd = [SERVER_BIN, "-m", args.model, "-ngl", "99", "--port", str(PORT), "--log-disable"]
|
| 74 |
+
if args.device:
|
| 75 |
+
cmd += ["-dev", args.device]
|
| 76 |
+
print(f"Starting server: {' '.join(cmd)}", file=sys.stderr)
|
| 77 |
+
proc = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
| 78 |
+
|
| 79 |
+
try:
|
| 80 |
+
if not wait_for_server(address):
|
| 81 |
+
print("ERROR: server did not start", file=sys.stderr)
|
| 82 |
+
proc.kill()
|
| 83 |
+
sys.exit(1)
|
| 84 |
+
print("Server ready", file=sys.stderr)
|
| 85 |
+
|
| 86 |
+
ttfts, tpots = [], []
|
| 87 |
+
for i in range(args.runs):
|
| 88 |
+
ttft, tpot, n_tokens, t_total = measure_once(address)
|
| 89 |
+
ttfts.append(ttft * 1000)
|
| 90 |
+
tpots.append(tpot * 1000)
|
| 91 |
+
print(f" Run {i+1}: TTFT={ttft*1000:.1f}ms TPOT={tpot*1000:.1f}ms tokens={n_tokens}", file=sys.stderr)
|
| 92 |
+
|
| 93 |
+
avg_ttft = sum(ttfts) / len(ttfts)
|
| 94 |
+
avg_tpot = sum(tpots) / len(tpots)
|
| 95 |
+
result = {"ttft_ms": round(avg_ttft, 1), "tpot_ms": round(avg_tpot, 1), "latency_ms": round(avg_ttft + avg_tpot, 1)}
|
| 96 |
+
print(json.dumps(result))
|
| 97 |
+
finally:
|
| 98 |
+
proc.kill()
|
| 99 |
+
proc.wait()
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
if __name__ == "__main__":
|
| 103 |
+
main()
|
parse_results.py
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Parse all benchmark result files and print both summary tables + write CSV."""
|
| 3 |
+
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
import csv
|
| 8 |
+
|
| 9 |
+
RESULTS_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 10 |
+
|
| 11 |
+
KEYS = [
|
| 12 |
+
("llama-3.1-8b-instruct", "Llama-3.1-8B", "f16", "F16"),
|
| 13 |
+
("llama-3.1-8b-instruct", "Llama-3.1-8B", "Q8_0", "Q8_0"),
|
| 14 |
+
("llama-3.1-8b-instruct", "Llama-3.1-8B", "Q4_K_M", "Q4_K_M"),
|
| 15 |
+
("llama-3.1-8b-instruct", "Llama-3.1-8B", "Q2_K", "Q2_K"),
|
| 16 |
+
("qwen2.5-7b-instruct", "Qwen2.5-7B", "f16", "F16"),
|
| 17 |
+
("qwen2.5-7b-instruct", "Qwen2.5-7B", "Q8_0", "Q8_0"),
|
| 18 |
+
("qwen2.5-7b-instruct", "Qwen2.5-7B", "Q4_K_M", "Q4_K_M"),
|
| 19 |
+
("qwen2.5-7b-instruct", "Qwen2.5-7B", "Q2_K", "Q2_K"),
|
| 20 |
+
("gemma-2-9b-it", "Gemma-2-9B", "f16", "F16"),
|
| 21 |
+
("gemma-2-9b-it", "Gemma-2-9B", "Q8_0", "Q8_0"),
|
| 22 |
+
("gemma-2-9b-it", "Gemma-2-9B", "Q4_K_M", "Q4_K_M"),
|
| 23 |
+
("gemma-2-9b-it", "Gemma-2-9B", "Q2_K", "Q2_K"),
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def path(prefix, suffix):
|
| 28 |
+
return os.path.join(RESULTS_DIR, f"{prefix}_{suffix}")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def parse_bench(prefix):
|
| 32 |
+
"""Return (prefill_ts, prefill_std, decode_ts, decode_std, weight_gib) or Nones."""
|
| 33 |
+
p = path(prefix, "bench.json")
|
| 34 |
+
if not os.path.exists(p):
|
| 35 |
+
return None, None, None, None, None
|
| 36 |
+
try:
|
| 37 |
+
data = json.load(open(p))
|
| 38 |
+
prefill_ts = prefill_std = decode_ts = decode_std = weight_gib = None
|
| 39 |
+
for r in data:
|
| 40 |
+
if r.get("n_prompt", 0) > 0 and r.get("n_gen", 0) == 0:
|
| 41 |
+
prefill_ts = round(r["avg_ts"], 1)
|
| 42 |
+
prefill_std = round(r.get("stddev_ts", 0), 1)
|
| 43 |
+
weight_gib = round(r["model_size"] / (1024**3), 2)
|
| 44 |
+
elif r.get("n_gen", 0) > 0 and r.get("n_prompt", 0) == 0:
|
| 45 |
+
decode_ts = round(r["avg_ts"], 1)
|
| 46 |
+
decode_std = round(r.get("stddev_ts", 0), 1)
|
| 47 |
+
return prefill_ts, prefill_std, decode_ts, decode_std, weight_gib
|
| 48 |
+
except Exception as e:
|
| 49 |
+
print(f" WARN bench parse error for {prefix}: {e}")
|
| 50 |
+
return None, None, None, None, None
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def parse_vram(prefix):
|
| 54 |
+
"""Return peak VRAM (GiB) from nvidia-smi dmon log.
|
| 55 |
+
|
| 56 |
+
When monitoring a single GPU (-i N), this is simply the max fb value seen.
|
| 57 |
+
When monitoring all GPUs (older logs), we fall back to sum of
|
| 58 |
+
(peak β min) per GPU as an estimate of our job's incremental VRAM.
|
| 59 |
+
"""
|
| 60 |
+
p = path(prefix, "vram.log")
|
| 61 |
+
if not os.path.exists(p):
|
| 62 |
+
return None
|
| 63 |
+
try:
|
| 64 |
+
from collections import defaultdict
|
| 65 |
+
gpu_min = defaultdict(lambda: float("inf"))
|
| 66 |
+
gpu_max = defaultdict(lambda: 0)
|
| 67 |
+
with open(p) as f:
|
| 68 |
+
has_timestamp = False
|
| 69 |
+
for header_line in f:
|
| 70 |
+
if header_line.strip().startswith("#") and "Time" in header_line:
|
| 71 |
+
has_timestamp = True
|
| 72 |
+
if not header_line.strip().startswith("#"):
|
| 73 |
+
break
|
| 74 |
+
f.seek(0)
|
| 75 |
+
for line in f:
|
| 76 |
+
line = line.strip()
|
| 77 |
+
if not line or line.startswith("#"):
|
| 78 |
+
continue
|
| 79 |
+
parts = line.split()
|
| 80 |
+
if len(parts) < 3:
|
| 81 |
+
continue
|
| 82 |
+
try:
|
| 83 |
+
if has_timestamp:
|
| 84 |
+
# cols: HH:MM:SS gpu_idx fb_mb bar1_mb ...
|
| 85 |
+
gpu = int(parts[1])
|
| 86 |
+
fb = int(parts[2])
|
| 87 |
+
else:
|
| 88 |
+
# cols: gpu_idx fb_mb bar1_mb ccpm_mb
|
| 89 |
+
gpu = int(parts[0])
|
| 90 |
+
fb = int(parts[1])
|
| 91 |
+
if fb < gpu_min[gpu]:
|
| 92 |
+
gpu_min[gpu] = fb
|
| 93 |
+
if fb > gpu_max[gpu]:
|
| 94 |
+
gpu_max[gpu] = fb
|
| 95 |
+
except (ValueError, IndexError):
|
| 96 |
+
continue
|
| 97 |
+
if not gpu_max:
|
| 98 |
+
return None
|
| 99 |
+
n_gpus = len(gpu_max)
|
| 100 |
+
if n_gpus == 1:
|
| 101 |
+
# Single-GPU log: peak fb is exactly our job's peak VRAM
|
| 102 |
+
peak_mib = max(gpu_max.values())
|
| 103 |
+
else:
|
| 104 |
+
# Multi-GPU log: use delta (peak - min) per GPU to strip baseline
|
| 105 |
+
peak_mib = sum(gpu_max[g] - gpu_min[g] for g in gpu_max)
|
| 106 |
+
return round(peak_mib / 1024, 2)
|
| 107 |
+
except Exception as e:
|
| 108 |
+
print(f" WARN vram parse error for {prefix}: {e}")
|
| 109 |
+
return None
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def parse_ttft(prefix):
|
| 113 |
+
"""Return (ttft_ms, tpot_ms, latency_ms) or (None, None, None)."""
|
| 114 |
+
p = path(prefix, "ttft.json")
|
| 115 |
+
if not os.path.exists(p):
|
| 116 |
+
return None, None, None
|
| 117 |
+
try:
|
| 118 |
+
d = json.load(open(p))
|
| 119 |
+
return d.get("ttft_ms"), d.get("tpot_ms"), d.get("latency_ms")
|
| 120 |
+
except Exception as e:
|
| 121 |
+
print(f" WARN ttft parse error for {prefix}: {e}")
|
| 122 |
+
return None, None, None
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def parse_ppl(prefix):
|
| 126 |
+
"""Return (perplexity, ppl_std) or (None, None)."""
|
| 127 |
+
p = path(prefix, "ppl.txt")
|
| 128 |
+
if not os.path.exists(p):
|
| 129 |
+
return None, None
|
| 130 |
+
try:
|
| 131 |
+
text = open(p).read()
|
| 132 |
+
m = re.search(r"PPL\s*=\s*([\d.]+)\s*\+/-\s*([\d.]+)", text)
|
| 133 |
+
if m:
|
| 134 |
+
return float(m.group(1)), float(m.group(2))
|
| 135 |
+
m = re.search(r"PPL\s*=\s*([\d.]+)", text)
|
| 136 |
+
return (float(m.group(1)), None) if m else (None, None)
|
| 137 |
+
except Exception as e:
|
| 138 |
+
print(f" WARN ppl parse error for {prefix}: {e}")
|
| 139 |
+
return None, None
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def parse_hellaswag(prefix):
|
| 143 |
+
"""Return accuracy % float or None."""
|
| 144 |
+
p = path(prefix, "hellaswag.txt")
|
| 145 |
+
if not os.path.exists(p):
|
| 146 |
+
return None
|
| 147 |
+
try:
|
| 148 |
+
text = open(p).read().strip()
|
| 149 |
+
# format: "400\t78.50000000%\t[74.2%, 82.2%]"
|
| 150 |
+
m = re.search(r"([\d.]+)%", text)
|
| 151 |
+
return round(float(m.group(1)), 2) if m else None
|
| 152 |
+
except Exception as e:
|
| 153 |
+
print(f" WARN hellaswag parse error for {prefix}: {e}")
|
| 154 |
+
return None
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def parse_winogrande(prefix):
|
| 158 |
+
"""Return accuracy % float or None."""
|
| 159 |
+
p = path(prefix, "winogrande.txt")
|
| 160 |
+
if not os.path.exists(p):
|
| 161 |
+
return None
|
| 162 |
+
try:
|
| 163 |
+
text = open(p).read().strip()
|
| 164 |
+
# format: "1267\t73.4807\t..." β second col is % correct
|
| 165 |
+
parts = text.split()
|
| 166 |
+
return round(float(parts[1]), 2) if len(parts) >= 2 else None
|
| 167 |
+
except Exception as e:
|
| 168 |
+
print(f" WARN winogrande parse error for {prefix}: {e}")
|
| 169 |
+
return None
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def fmt(val, fmt_str, missing="β"):
|
| 173 |
+
return format(val, fmt_str) if val is not None else missing
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def main():
|
| 177 |
+
rows = []
|
| 178 |
+
for (file_prefix_base, model_label, quant_file, quant_label) in KEYS:
|
| 179 |
+
prefix = f"{file_prefix_base}-{quant_file}"
|
| 180 |
+
prefill, prefill_std, decode, decode_std, weight = parse_bench(prefix)
|
| 181 |
+
peak_vram = parse_vram(prefix)
|
| 182 |
+
ttft, tpot, latency = parse_ttft(prefix)
|
| 183 |
+
ppl, ppl_std = parse_ppl(prefix)
|
| 184 |
+
hellaswag = parse_hellaswag(prefix)
|
| 185 |
+
winogrande = parse_winogrande(prefix)
|
| 186 |
+
|
| 187 |
+
rows.append({
|
| 188 |
+
"model": model_label,
|
| 189 |
+
"quant": quant_label,
|
| 190 |
+
"prefill_ts": prefill,
|
| 191 |
+
"prefill_std": prefill_std,
|
| 192 |
+
"decode_ts": decode,
|
| 193 |
+
"decode_std": decode_std,
|
| 194 |
+
"weight_gib": weight,
|
| 195 |
+
"peak_vram": peak_vram,
|
| 196 |
+
"ttft_ms": ttft,
|
| 197 |
+
"tpot_ms": tpot,
|
| 198 |
+
"latency_ms": latency,
|
| 199 |
+
"ppl": ppl,
|
| 200 |
+
"ppl_std": ppl_std,
|
| 201 |
+
"hellaswag": hellaswag,
|
| 202 |
+
"winogrande": winogrande,
|
| 203 |
+
})
|
| 204 |
+
|
| 205 |
+
# ββ Write CSV βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 206 |
+
csv_path = os.path.join(RESULTS_DIR, "benchmark_results.csv")
|
| 207 |
+
fieldnames = ["model", "quant", "prefill_ts", "prefill_std",
|
| 208 |
+
"decode_ts", "decode_std",
|
| 209 |
+
"ttft_ms", "tpot_ms", "latency_ms",
|
| 210 |
+
"weight_gib", "peak_vram",
|
| 211 |
+
"ppl", "ppl_std", "hellaswag", "winogrande"]
|
| 212 |
+
with open(csv_path, "w", newline="") as f:
|
| 213 |
+
w = csv.DictWriter(f, fieldnames=fieldnames)
|
| 214 |
+
w.writeheader()
|
| 215 |
+
w.writerows(rows)
|
| 216 |
+
print(f"CSV written β {csv_path}\n")
|
| 217 |
+
|
| 218 |
+
# ββ Table 1: Speed & Memory βββββββββββββββββββββββββββββββββββββββββββββββ
|
| 219 |
+
print("TABLE 1 β Speed & Memory")
|
| 220 |
+
print("β" * 120)
|
| 221 |
+
hdr = (f"{'Model':<22} {'Quant':<8} {'Prefill (t/s)':>18} {'Decode (t/s)':>18}"
|
| 222 |
+
f" {'TTFT':>8} {'TPOT':>8} {'Latency':>9} {'Wt(GiB)':>8} {'PkVRAM':>8}")
|
| 223 |
+
print(hdr)
|
| 224 |
+
print(f"{'':22} {'':8} {'mean Β± std':>18} {'mean Β± std':>18}"
|
| 225 |
+
f" {'(ms)':>8} {'(ms)':>8} {'(ms)':>9} {'':>8} {'(GiB)':>8}")
|
| 226 |
+
print("β" * 120)
|
| 227 |
+
|
| 228 |
+
prev_model = None
|
| 229 |
+
for r in rows:
|
| 230 |
+
if r["model"] != prev_model and prev_model is not None:
|
| 231 |
+
print()
|
| 232 |
+
prev_model = r["model"]
|
| 233 |
+
idx = rows.index(r)
|
| 234 |
+
is_first = idx == 0 or rows[idx-1]["model"] != r["model"]
|
| 235 |
+
|
| 236 |
+
if r["prefill_ts"] is not None and r["prefill_std"] is not None:
|
| 237 |
+
prefill_col = f"{r['prefill_ts']:.1f}Β±{r['prefill_std']:.1f}"
|
| 238 |
+
else:
|
| 239 |
+
prefill_col = "β"
|
| 240 |
+
if r["decode_ts"] is not None and r["decode_std"] is not None:
|
| 241 |
+
decode_col = f"{r['decode_ts']:.1f}Β±{r['decode_std']:.1f}"
|
| 242 |
+
else:
|
| 243 |
+
decode_col = "β"
|
| 244 |
+
|
| 245 |
+
print(
|
| 246 |
+
f"{r['model'] if is_first else '':22}"
|
| 247 |
+
f" {r['quant']:<8}"
|
| 248 |
+
f" {prefill_col:>18}"
|
| 249 |
+
f" {decode_col:>18}"
|
| 250 |
+
f" {fmt(r['ttft_ms'], '8.1f')}"
|
| 251 |
+
f" {fmt(r['tpot_ms'], '8.1f')}"
|
| 252 |
+
f" {fmt(r['latency_ms'], '9.1f')}"
|
| 253 |
+
f" {fmt(r['weight_gib'], '8.2f')}"
|
| 254 |
+
f" {fmt(r['peak_vram'], '8.2f')}"
|
| 255 |
+
)
|
| 256 |
+
print("β" * 120)
|
| 257 |
+
|
| 258 |
+
# ββ Table 2: Quality ββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 259 |
+
print("\nTABLE 2 β Quality")
|
| 260 |
+
print("β" * 70)
|
| 261 |
+
print(f"{'Model':<22} {'Quant':<8} {'PPLβ':>8} {'HellaSwagβ':>12} {'Winograndeβ':>13}")
|
| 262 |
+
print(f"{'':22} {'':8} {'':>8} {'(%)':>12} {'(%)':>13}")
|
| 263 |
+
print("β" * 70)
|
| 264 |
+
|
| 265 |
+
prev_model = None
|
| 266 |
+
for r in rows:
|
| 267 |
+
if r["model"] != prev_model and prev_model is not None:
|
| 268 |
+
print()
|
| 269 |
+
prev_model = r["model"]
|
| 270 |
+
idx = rows.index(r)
|
| 271 |
+
is_first = idx == 0 or rows[idx-1]["model"] != r["model"]
|
| 272 |
+
print(
|
| 273 |
+
f"{r['model'] if is_first else '':22}"
|
| 274 |
+
f" {r['quant']:<8}"
|
| 275 |
+
f" {fmt(r['ppl'], '8.2f')}"
|
| 276 |
+
f" {fmt(r['hellaswag'], '12.2f')}"
|
| 277 |
+
f" {fmt(r['winogrande'], '13.2f')}"
|
| 278 |
+
)
|
| 279 |
+
print("β" * 70)
|
| 280 |
+
|
| 281 |
+
# ββ LaTeX: Quality table βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 282 |
+
print()
|
| 283 |
+
print(r"""\begin{table}[t]
|
| 284 |
+
\centering
|
| 285 |
+
\caption{%
|
| 286 |
+
Model quality metrics at multiple quantization levels.
|
| 287 |
+
\textbf{PPL} = perplexity on the Wikitext-2 test set
|
| 288 |
+
(lower is better).
|
| 289 |
+
\textbf{HellaSwag} = accuracy on 400 commonsense-NLI tasks from the
|
| 290 |
+
HellaSwag validation set (higher is better).
|
| 291 |
+
\textbf{Winogrande} = accuracy on 1{,}267 debiased pronoun-resolution
|
| 292 |
+
tasks (higher is better).
|
| 293 |
+
Both accuracy benchmarks are evaluated via log-likelihood ranking.%
|
| 294 |
+
}
|
| 295 |
+
\label{tab:llamacpp_quality}
|
| 296 |
+
\begin{tabular}{@{} l l r rr @{}}
|
| 297 |
+
\toprule
|
| 298 |
+
\textbf{Model} & \textbf{Quant} &
|
| 299 |
+
\textbf{PPL\,$\downarrow$} &
|
| 300 |
+
\textbf{HellaSwag (\%)\,$\uparrow$} &
|
| 301 |
+
\textbf{Winogrande (\%)\,$\uparrow$} \\""")
|
| 302 |
+
|
| 303 |
+
prev_model = None
|
| 304 |
+
for r in rows:
|
| 305 |
+
idx = rows.index(r)
|
| 306 |
+
is_first = idx == 0 or rows[idx-1]["model"] != r["model"]
|
| 307 |
+
if is_first:
|
| 308 |
+
print(r" \midrule")
|
| 309 |
+
|
| 310 |
+
quant = r["quant"].replace("_", r"\_")
|
| 311 |
+
name_col = r["model"] if is_first else " " * len(r["model"])
|
| 312 |
+
|
| 313 |
+
if r["ppl"] is not None and r["ppl_std"] is not None:
|
| 314 |
+
ppl_str = f"${r['ppl']:.2f}\\pm{r['ppl_std']:.2f}$"
|
| 315 |
+
elif r["ppl"] is not None:
|
| 316 |
+
ppl_str = f"{r['ppl']:.2f}"
|
| 317 |
+
else:
|
| 318 |
+
ppl_str = "---"
|
| 319 |
+
|
| 320 |
+
hs = f"{r['hellaswag']:.2f}" if r["hellaswag"] is not None else "---"
|
| 321 |
+
wg = f"{r['winogrande']:.2f}" if r["winogrande"] is not None else "---"
|
| 322 |
+
|
| 323 |
+
print(f" {name_col} & {quant} & {ppl_str} & {hs} & {wg} \\\\")
|
| 324 |
+
|
| 325 |
+
print(r""" \bottomrule
|
| 326 |
+
\end{tabular}
|
| 327 |
+
\end{table}""")
|
| 328 |
+
|
| 329 |
+
# ββ LaTeX: Speed & Memory table ββββββββββββββββββββββββββββββββββββββββββ
|
| 330 |
+
print()
|
| 331 |
+
print(r"""\begin{table*}[t]
|
| 332 |
+
\centering
|
| 333 |
+
\caption{%
|
| 334 |
+
Inference speed and memory usage for three open-weight LLMs at multiple
|
| 335 |
+
quantization levels, measured on NVIDIA~L40S GPU
|
| 336 |
+
using llama.cpp.
|
| 337 |
+
\textbf{Prefill} = prompt-processing throughput at 512 input tokens
|
| 338 |
+
(PP512, tokens/s);
|
| 339 |
+
\textbf{Decode} = text-generation throughput at 128 output tokens
|
| 340 |
+
(TG128, tokens/s);
|
| 341 |
+
\textbf{TTFT} = time-to-first-token;
|
| 342 |
+
\textbf{TPOT} = time-per-output-token;
|
| 343 |
+
\textbf{Latency} = TTFT\,+\,TPOT (mean of 3 streaming runs,
|
| 344 |
+
512-token prompt, 128 output tokens, temperature~0);
|
| 345 |
+
\textbf{Weight} = model-weight VRAM footprint;
|
| 346 |
+
\textbf{Peak} = maximum VRAM during benchmark.%
|
| 347 |
+
}
|
| 348 |
+
\label{tab:llamacpp_speed_memory}
|
| 349 |
+
\resizebox{\linewidth}{!}{%
|
| 350 |
+
\begin{tabular}{@{} l l rr rrr rr @{}}
|
| 351 |
+
\toprule
|
| 352 |
+
\multirow{2}{*}{\textbf{Model}} &
|
| 353 |
+
\multirow{2}{*}{\textbf{Quant}} &
|
| 354 |
+
\multicolumn{2}{c}{\textbf{Throughput (t/s)}} &
|
| 355 |
+
\multicolumn{3}{c}{\textbf{Latency (ms)}} &
|
| 356 |
+
\multicolumn{2}{c}{\textbf{VRAM (GiB)}} \\
|
| 357 |
+
\cmidrule(lr){3-4}\cmidrule(lr){5-7}\cmidrule(lr){8-9}
|
| 358 |
+
& &
|
| 359 |
+
\textbf{Prefill} & \textbf{Decode} &
|
| 360 |
+
\textbf{TTFT} & \textbf{TPOT} & \textbf{Total} &
|
| 361 |
+
\textbf{Weight} & \textbf{Peak} \\""")
|
| 362 |
+
|
| 363 |
+
prev_model = None
|
| 364 |
+
for r in rows:
|
| 365 |
+
idx = rows.index(r)
|
| 366 |
+
is_first = idx == 0 or rows[idx-1]["model"] != r["model"]
|
| 367 |
+
if is_first:
|
| 368 |
+
print(r" \midrule")
|
| 369 |
+
|
| 370 |
+
quant = r["quant"].replace("_", r"\_")
|
| 371 |
+
name_col = r["model"] if is_first else " " * len(r["model"])
|
| 372 |
+
|
| 373 |
+
if r["prefill_ts"] is not None and r["prefill_std"] is not None:
|
| 374 |
+
prefill_str = f"${r['prefill_ts']:.0f}\\pm{r['prefill_std']:.0f}$"
|
| 375 |
+
elif r["prefill_ts"] is not None:
|
| 376 |
+
prefill_str = f"{r['prefill_ts']:.0f}"
|
| 377 |
+
else:
|
| 378 |
+
prefill_str = "---"
|
| 379 |
+
|
| 380 |
+
if r["decode_ts"] is not None and r["decode_std"] is not None:
|
| 381 |
+
decode_str = f"${r['decode_ts']:.1f}\\pm{r['decode_std']:.1f}$"
|
| 382 |
+
elif r["decode_ts"] is not None:
|
| 383 |
+
decode_str = f"{r['decode_ts']:.1f}"
|
| 384 |
+
else:
|
| 385 |
+
decode_str = "---"
|
| 386 |
+
|
| 387 |
+
ttft = f"{r['ttft_ms']:.1f}" if r["ttft_ms"] is not None else "---"
|
| 388 |
+
tpot = f"{r['tpot_ms']:.2f}" if r["tpot_ms"] is not None else "---"
|
| 389 |
+
lat = f"{r['latency_ms']:.1f}" if r["latency_ms"] is not None else "---"
|
| 390 |
+
wt = f"{r['weight_gib']:.2f}" if r["weight_gib"] is not None else "---"
|
| 391 |
+
pk = f"{r['peak_vram']:.2f}" if r["peak_vram"] is not None else "---"
|
| 392 |
+
|
| 393 |
+
print(
|
| 394 |
+
f" {name_col} & {quant} & "
|
| 395 |
+
f"{prefill_str} & {decode_str} & "
|
| 396 |
+
f"{ttft} & {tpot} & {lat} & "
|
| 397 |
+
f"{wt} & {pk} \\\\"
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
print(r""" \bottomrule
|
| 401 |
+
\end{tabular}%
|
| 402 |
+
}
|
| 403 |
+
\end{table*}""")
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
if __name__ == "__main__":
|
| 407 |
+
main()
|
run_benchmark.sh
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# run_benchmark.sh β run all llama.cpp benchmarks for 3 LLMs Γ 4 quants
|
| 3 |
+
#
|
| 4 |
+
# Produces per-combination result files in the same directory as this script:
|
| 5 |
+
# {prefix}_bench.json β llama-bench throughput + model size
|
| 6 |
+
# {prefix}_vram.log β nvidia-smi dmon (raw, parsed later)
|
| 7 |
+
# {prefix}_ttft.json β TTFT / TPOT from llama-server stream
|
| 8 |
+
# {prefix}_ppl.txt β perplexity on wikitext-2
|
| 9 |
+
# {prefix}_hellaswag.txt β HellaSwag accuracy (400 tasks)
|
| 10 |
+
# {prefix}_winogrande.txtβ Winogrande accuracy (all tasks)
|
| 11 |
+
#
|
| 12 |
+
# Run with: bash results/run_benchmark.sh
|
| 13 |
+
# Skip already-done runs: set SKIP_EXISTING=1 (default)
|
| 14 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 15 |
+
set -euo pipefail
|
| 16 |
+
|
| 17 |
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
| 18 |
+
ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
| 19 |
+
|
| 20 |
+
BENCH="$ROOT/build/bin/llama-bench"
|
| 21 |
+
PPL="$ROOT/build/bin/llama-perplexity"
|
| 22 |
+
|
| 23 |
+
WIKI="$ROOT/wikitext-2-raw/wiki.test.raw"
|
| 24 |
+
HELLASWAG="$ROOT/hellaswag_val_400.txt"
|
| 25 |
+
WINOGRANDE="$ROOT/winogrande-debiased-eval.csv"
|
| 26 |
+
TTFT_SCRIPT="$SCRIPT_DIR/measure_ttft.py"
|
| 27 |
+
|
| 28 |
+
SKIP_EXISTING="${SKIP_EXISTING:-1}" # set to 0 to re-run everything
|
| 29 |
+
SKIP_WINOGRANDE="${SKIP_WINOGRANDE:-0}" # set to 1 to skip Winogrande entirely
|
| 30 |
+
DEVICE="${DEVICE:-CUDA0}" # single GPU to use (CUDA0 = GPU 0)
|
| 31 |
+
GPU_IDX=0 # nvidia-smi GPU index matching DEVICE
|
| 32 |
+
NGL=99 # GPU layers
|
| 33 |
+
PPL_CTX=512 # context window for perplexity
|
| 34 |
+
PPL_CHUNKS=400 # chunks to evaluate (~488 total); all chunks for full accuracy
|
| 35 |
+
PPL_BATCH=8192 # n_batch for perplexity: n_seq = PPL_BATCH/PPL_CTX
|
| 36 |
+
# 8192/512 = 16 chunks processed in parallel per pass
|
| 37 |
+
HELLASWAG_TASKS=400 # subset file already contains exactly 400 tasks
|
| 38 |
+
|
| 39 |
+
# ββ Model paths βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 40 |
+
HF_LLAMA="/home/user1/.cache/huggingface/hub/models--bartowski--Meta-Llama-3.1-8B-Instruct-GGUF/snapshots/bf5b95e96dac0462e2a09145ec66cae9a3f12067"
|
| 41 |
+
HF_QWEN="/home/user1/.cache/huggingface/hub/models--bartowski--Qwen2.5-7B-Instruct-GGUF/snapshots/8911e8a47f92bac19d6f5c64a2e2095bd2f7d031"
|
| 42 |
+
HF_GEMMA="/home/user1/.cache/huggingface/hub/models--bartowski--gemma-2-9b-it-GGUF/snapshots/d731033f3dc4018261fd39896e50984d398b4ac5"
|
| 43 |
+
|
| 44 |
+
declare -A MODEL # MODEL[key] = path
|
| 45 |
+
MODEL[llama-f16]="$ROOT/models/llama-3.1-8b-instruct-f16.gguf"
|
| 46 |
+
MODEL[llama-Q8_0]="$HF_LLAMA/Meta-Llama-3.1-8B-Instruct-Q8_0.gguf"
|
| 47 |
+
MODEL[llama-Q4_K_M]="$ROOT/models/llama-3.1-8b-instruct-Q4_K_M.gguf"
|
| 48 |
+
MODEL[llama-Q2_K]="$HF_LLAMA/Meta-Llama-3.1-8B-Instruct-Q2_K.gguf"
|
| 49 |
+
MODEL[qwen-f16]="$HF_QWEN/Qwen2.5-7B-Instruct-f16.gguf"
|
| 50 |
+
MODEL[qwen-Q8_0]="$HF_QWEN/Qwen2.5-7B-Instruct-Q8_0.gguf"
|
| 51 |
+
MODEL[qwen-Q4_K_M]="$HF_QWEN/Qwen2.5-7B-Instruct-Q4_K_M.gguf"
|
| 52 |
+
MODEL[qwen-Q2_K]="$HF_QWEN/Qwen2.5-7B-Instruct-Q2_K.gguf"
|
| 53 |
+
MODEL[gemma-f16]="$ROOT/models/gemma-2-9b-it-f16.gguf"
|
| 54 |
+
MODEL[gemma-Q8_0]="$ROOT/models/gemma-2-9b-it-Q8_0.gguf"
|
| 55 |
+
MODEL[gemma-Q4_K_M]="$ROOT/models/gemma-2-9b-it-Q4_K_M.gguf"
|
| 56 |
+
MODEL[gemma-Q2_K]="$HF_GEMMA/gemma-2-9b-it-Q2_K.gguf"
|
| 57 |
+
# Result file prefix for each key (matches existing naming convention)
|
| 58 |
+
declare -A PREFIX
|
| 59 |
+
PREFIX[llama-f16]="llama-3.1-8b-instruct-f16"
|
| 60 |
+
PREFIX[llama-Q8_0]="llama-3.1-8b-instruct-Q8_0"
|
| 61 |
+
PREFIX[llama-Q4_K_M]="llama-3.1-8b-instruct-Q4_K_M"
|
| 62 |
+
PREFIX[llama-Q2_K]="llama-3.1-8b-instruct-Q2_K"
|
| 63 |
+
PREFIX[qwen-f16]="qwen2.5-7b-instruct-f16"
|
| 64 |
+
PREFIX[qwen-Q8_0]="qwen2.5-7b-instruct-Q8_0"
|
| 65 |
+
PREFIX[qwen-Q4_K_M]="qwen2.5-7b-instruct-Q4_K_M"
|
| 66 |
+
PREFIX[qwen-Q2_K]="qwen2.5-7b-instruct-Q2_K"
|
| 67 |
+
PREFIX[gemma-f16]="gemma-2-9b-it-f16"
|
| 68 |
+
PREFIX[gemma-Q8_0]="gemma-2-9b-it-Q8_0"
|
| 69 |
+
PREFIX[gemma-Q4_K_M]="gemma-2-9b-it-Q4_K_M"
|
| 70 |
+
PREFIX[gemma-Q2_K]="gemma-2-9b-it-Q2_K"
|
| 71 |
+
KEYS=(
|
| 72 |
+
llama-f16 llama-Q8_0 llama-Q4_K_M llama-Q2_K
|
| 73 |
+
qwen-f16 qwen-Q8_0 qwen-Q4_K_M qwen-Q2_K
|
| 74 |
+
gemma-f16 gemma-Q8_0 gemma-Q4_K_M gemma-Q2_K
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
# ββ Helpers βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 78 |
+
log() { echo "[$(date '+%H:%M:%S')] $*"; }
|
| 79 |
+
skip() { [[ "$SKIP_EXISTING" == "1" && -s "$1" ]]; }
|
| 80 |
+
|
| 81 |
+
# ββ Main loop βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 82 |
+
for KEY in "${KEYS[@]}"; do
|
| 83 |
+
MODEL_PATH="${MODEL[$KEY]}"
|
| 84 |
+
PFX="$SCRIPT_DIR/${PREFIX[$KEY]}"
|
| 85 |
+
|
| 86 |
+
if [[ ! -f "$MODEL_PATH" ]]; then
|
| 87 |
+
echo "WARNING: model not found, skipping $KEY: $MODEL_PATH"
|
| 88 |
+
continue
|
| 89 |
+
fi
|
| 90 |
+
|
| 91 |
+
log "βββ $KEY βββ"
|
| 92 |
+
|
| 93 |
+
# ββ 1. llama-bench (throughput + model size) with VRAM monitoring βββββββββ
|
| 94 |
+
BENCH_OUT="$PFX"_bench.json
|
| 95 |
+
VRAM_OUT="$PFX"_vram.log
|
| 96 |
+
if skip "$BENCH_OUT"; then
|
| 97 |
+
log " [bench] skipping (exists)"
|
| 98 |
+
else
|
| 99 |
+
log " [bench] starting nvidia-smi dmon (GPU $GPU_IDX only)..."
|
| 100 |
+
nvidia-smi dmon -s m -d 1 -i "$GPU_IDX" > "$VRAM_OUT" &
|
| 101 |
+
DMON_PID=$!
|
| 102 |
+
|
| 103 |
+
log " [bench] running llama-bench (single GPU: $DEVICE)..."
|
| 104 |
+
"$BENCH" \
|
| 105 |
+
-m "$MODEL_PATH" \
|
| 106 |
+
-ngl "$NGL" \
|
| 107 |
+
-dev "$DEVICE" \
|
| 108 |
+
-p 512 -n 128 -r 3 \
|
| 109 |
+
-o json \
|
| 110 |
+
> "$BENCH_OUT"
|
| 111 |
+
|
| 112 |
+
kill "$DMON_PID" 2>/dev/null && wait "$DMON_PID" 2>/dev/null || true
|
| 113 |
+
log " [bench] done β $BENCH_OUT"
|
| 114 |
+
fi
|
| 115 |
+
|
| 116 |
+
# ββ 2. TTFT / TPOT via llama-server ββββββββββββββββββββββββββββββββββββββ
|
| 117 |
+
TTFT_OUT="$PFX"_ttft.json
|
| 118 |
+
if skip "$TTFT_OUT"; then
|
| 119 |
+
log " [ttft] skipping (exists)"
|
| 120 |
+
else
|
| 121 |
+
log " [ttft] running measure_ttft.py (single GPU: $DEVICE)..."
|
| 122 |
+
python3 "$TTFT_SCRIPT" -m "$MODEL_PATH" --device "$DEVICE" \
|
| 123 |
+
> "$TTFT_OUT" \
|
| 124 |
+
2> "$PFX"_ttft.log
|
| 125 |
+
log " [ttft] done β $TTFT_OUT"
|
| 126 |
+
fi
|
| 127 |
+
|
| 128 |
+
# ββ 3. Perplexity (wikitext-2, fixed context for cross-model comparability)
|
| 129 |
+
PPL_OUT="$PFX"_ppl.txt
|
| 130 |
+
if skip "$PPL_OUT"; then
|
| 131 |
+
log " [ppl] skipping (exists)"
|
| 132 |
+
else
|
| 133 |
+
log " [ppl] running llama-perplexity (ctx=$PPL_CTX, chunks=$PPL_CHUNKS, batch=$PPL_BATCH, GPU: $DEVICE)..."
|
| 134 |
+
"$PPL" \
|
| 135 |
+
-m "$MODEL_PATH" \
|
| 136 |
+
-ngl "$NGL" \
|
| 137 |
+
-dev "$DEVICE" \
|
| 138 |
+
-f "$WIKI" \
|
| 139 |
+
-c "$PPL_CTX" \
|
| 140 |
+
-b "$PPL_BATCH" \
|
| 141 |
+
--chunks "$PPL_CHUNKS" \
|
| 142 |
+
2>&1 | grep "^Final estimate" | tail -1 > "$PPL_OUT"
|
| 143 |
+
log " [ppl] done β $PPL_OUT"
|
| 144 |
+
fi
|
| 145 |
+
|
| 146 |
+
# ββ 4. HellaSwag accuracy βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 147 |
+
HS_OUT="$PFX"_hellaswag.txt
|
| 148 |
+
if skip "$HS_OUT"; then
|
| 149 |
+
log " [hellaswag] skipping (exists)"
|
| 150 |
+
else
|
| 151 |
+
log " [hellaswag] running ($HELLASWAG_TASKS tasks, GPU: $DEVICE)..."
|
| 152 |
+
"$PPL" \
|
| 153 |
+
-m "$MODEL_PATH" \
|
| 154 |
+
-ngl "$NGL" \
|
| 155 |
+
-dev "$DEVICE" \
|
| 156 |
+
-f "$HELLASWAG" \
|
| 157 |
+
--hellaswag \
|
| 158 |
+
--hellaswag-tasks "$HELLASWAG_TASKS" \
|
| 159 |
+
2>&1 | grep -E "^[0-9]+[[:space:]]" | tail -1 > "$HS_OUT"
|
| 160 |
+
log " [hellaswag] done β $HS_OUT"
|
| 161 |
+
fi
|
| 162 |
+
|
| 163 |
+
# ββ 5. Winogrande accuracy ββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 164 |
+
WG_OUT="$PFX"_winogrande.txt
|
| 165 |
+
if [[ "$SKIP_WINOGRANDE" == "1" ]]; then
|
| 166 |
+
log " [winogrande] skipped (SKIP_WINOGRANDE=1)"
|
| 167 |
+
elif skip "$WG_OUT"; then
|
| 168 |
+
log " [winogrande] skipping (exists)"
|
| 169 |
+
else
|
| 170 |
+
log " [winogrande] running (GPU: $DEVICE)..."
|
| 171 |
+
"$PPL" \
|
| 172 |
+
-m "$MODEL_PATH" \
|
| 173 |
+
-ngl "$NGL" \
|
| 174 |
+
-dev "$DEVICE" \
|
| 175 |
+
-f "$WINOGRANDE" \
|
| 176 |
+
--winogrande \
|
| 177 |
+
2>&1 | grep -E "^[0-9]+[[:space:]]" | tail -1 > "$WG_OUT"
|
| 178 |
+
log " [winogrande] done β $WG_OUT"
|
| 179 |
+
fi
|
| 180 |
+
|
| 181 |
+
log " β $KEY complete"
|
| 182 |
+
done
|
| 183 |
+
|
| 184 |
+
log "All benchmarks done. Run: python3 $SCRIPT_DIR/parse_results.py"
|