Livengood Claude commited on
Commit
2dace0e
·
1 Parent(s): b502091

Add throughput estimates, architecture details, export feature, and tips guide

Browse files

New features:
- Tokens/second throughput estimates based on GPU memory bandwidth
- Model architecture details table (hidden size, heads, GQA ratio, vocab, max context)
- Context length scaling table showing KV cache at different lengths
- Export Report button to generate shareable text reports
- Tips & Guide tab with VRAM rules of thumb and recommended setups
- GPU bandwidth specs for all hardware (Consumer, Apple Silicon, Cloud)
- Best value AND fastest GPU recommendations
- Quantization quality indicators
- More example models (Hermes, Yi)
- Cross-model comparison set (Llama vs Mistral vs Qwen vs Gemma)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

Files changed (1) hide show
  1. app.py +254 -76
app.py CHANGED
@@ -8,39 +8,39 @@ from functools import lru_cache
8
 
9
  api = HfApi()
10
 
11
- # Consumer GPUs (no hourly cost)
12
  CONSUMER_GPUS = {
13
- "RTX 3080": 10,
14
- "RTX 3080 Ti": 12,
15
- "RTX 3090": 24,
16
- "RTX 3090 Ti": 24,
17
- "RTX 4080": 16,
18
- "RTX 4080 Super": 16,
19
- "RTX 4090": 24,
20
- "RTX 5090": 32,
21
  }
22
 
23
- # Apple Silicon (no hourly cost)
24
  APPLE_GPUS = {
25
- "M1 Max": 64,
26
- "M2 Max": 96,
27
- "M2 Ultra": 192,
28
- "M3 Max": 128,
29
- "M4 Max": 128,
30
  }
31
 
32
- # Cloud/Datacenter GPUs (with hourly costs from major providers)
33
  CLOUD_GPUS = {
34
- "T4": (16, 0.35),
35
- "L4": (24, 0.70),
36
- "A10G": (24, 1.00),
37
- "RTX A5000": (24, 0.80),
38
- "RTX A6000": (48, 1.50),
39
- "L40S": (48, 1.20),
40
- "A100 40GB": (40, 3.00),
41
- "A100 80GB": (80, 5.00),
42
- "H100 80GB": (80, 8.00),
43
- "H100 NVL": (94, 10.00),
44
  }
45
 
46
  DTYPE_BYTES = {
@@ -59,15 +59,6 @@ FRAMEWORKS = {
59
  "Ollama": 1.08,
60
  }
61
 
62
- CONTEXT_PRESETS = {
63
- "2K (fast chat)": 2048,
64
- "4K (standard)": 4096,
65
- "8K (extended)": 8192,
66
- "16K (long docs)": 16384,
67
- "32K (very long)": 32768,
68
- "128K (full context)": 131072,
69
- }
70
-
71
 
72
  def bytes_to_gb(b):
73
  return b / (1024 ** 3)
@@ -101,6 +92,18 @@ def get_params(info):
101
  return 0, "F16"
102
 
103
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  def calculate(model_id, context, batch, mode, framework, num_gpus, lora_rank):
105
  """Main calculation function"""
106
  try:
@@ -128,10 +131,14 @@ def calculate(model_id, context, batch, mode, framework, num_gpus, lora_rank):
128
  layers = config.get("num_hidden_layers", config.get("n_layer", 32))
129
  kv_heads = config.get("num_key_value_heads", config.get("num_attention_heads", 32))
130
  head_dim = config.get("head_dim", 128)
 
 
 
 
 
 
131
  if not head_dim:
132
- hidden = config.get("hidden_size", 4096)
133
- heads = config.get("num_attention_heads", 32)
134
- head_dim = hidden // heads if heads else 128
135
 
136
  kv_bytes = 2 * layers * batch * context * kv_heads * head_dim * dtype_bytes
137
  kv_gb = bytes_to_gb(kv_bytes)
@@ -141,6 +148,20 @@ def calculate(model_id, context, batch, mode, framework, num_gpus, lora_rank):
141
  out.append("**" + str(round(params_b, 1)) + "B parameters** | " + dtype + " | " + str(layers) + " layers")
142
  out.append("")
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  if mode == "Training (Full)":
145
  grad_gb = weights_gb
146
  opt_gb = bytes_to_gb(params * 8)
@@ -199,46 +220,56 @@ def calculate(model_id, context, batch, mode, framework, num_gpus, lora_rank):
199
  out.append("")
200
  out.append("## Total Required: " + str(round(total, 1)) + " GB")
201
 
202
- # Consumer GPUs section with colors
203
  out.append("")
204
  out.append("### Consumer GPUs")
205
- out.append("| GPU | VRAM | Status | Headroom |")
206
- out.append("|-----|------|--------|----------|")
207
- for gpu, vram in CONSUMER_GPUS.items():
208
  hr = vram - effective
209
  if hr >= 2:
210
  status = "🟢 Good fit"
211
  elif hr >= 0:
212
- status = "🟡 Tight fit"
213
  else:
214
- status = "🔴 Too small"
215
  sign = "+" if hr >= 0 else ""
216
- out.append("| " + gpu + " | " + str(vram) + "GB | " + status + " | " + sign + str(round(hr, 1)) + "GB |")
 
 
 
 
 
217
 
218
  # Apple Silicon section
219
  out.append("")
220
  out.append("### Apple Silicon (Unified Memory)")
221
- out.append("| Chip | Memory | Status | Headroom |")
222
- out.append("|------|--------|--------|----------|")
223
- for gpu, vram in APPLE_GPUS.items():
224
  hr = vram - effective
225
  if hr >= 10:
226
  status = "🟢 Excellent"
227
  elif hr >= 0:
228
  status = "🟡 Usable"
229
  else:
230
- status = "🔴 Too small"
231
  sign = "+" if hr >= 0 else ""
232
- out.append("| " + gpu + " | " + str(vram) + "GB | " + status + " | " + sign + str(round(hr, 1)) + "GB |")
 
 
 
 
 
233
 
234
- # Cloud GPUs section with costs
235
  out.append("")
236
  out.append("### Cloud GPU Options")
237
- out.append("| GPU | VRAM | Status | $/hour | $/day (8hr) | $/month |")
238
- out.append("|-----|------|--------|--------|-------------|---------|")
239
 
240
  cloud_options = []
241
- for gpu, (vram, cost) in CLOUD_GPUS.items():
242
  hr = vram - effective
243
  if hr >= 2:
244
  status = "🟢 Good"
@@ -247,34 +278,65 @@ def calculate(model_id, context, batch, mode, framework, num_gpus, lora_rank):
247
  else:
248
  status = "🔴 No"
249
  daily = cost * 8
250
- monthly = cost * 176 # 22 days * 8 hours
251
- cloud_options.append((gpu, vram, hr, status, cost, daily, monthly))
 
 
 
252
 
253
  # Sort by cost for those that fit
254
  cloud_options.sort(key=lambda x: (x[2] < 0, x[4]))
255
 
256
- for gpu, vram, hr, status, cost, daily, monthly in cloud_options:
257
  sign = "+" if hr >= 0 else ""
258
- out.append("| " + gpu + " | " + str(vram) + "GB | " + status + " | $" + str(round(cost, 2)) + " | $" + str(round(daily, 2)) + " | $" + str(int(monthly)) + " |")
 
259
 
260
  # Best value recommendation
261
- fitting_gpus = [(gpu, cost) for gpu, (vram, cost) in CLOUD_GPUS.items() if vram >= effective]
262
  if fitting_gpus:
263
  fitting_gpus.sort(key=lambda x: x[1])
264
  best = fitting_gpus[0]
265
  out.append("")
266
- out.append("**Best value cloud option:** " + best[0] + " at $" + str(round(best[1], 2)) + "/hour")
 
 
 
 
 
 
 
 
 
 
267
 
268
  # Quantization suggestions if model is large
269
  if effective > 24:
270
  out.append("")
271
  out.append("### Quantization Options (to fit consumer GPUs)")
272
- out.append("| Method | Estimated Size | Fits 24GB |")
273
- out.append("|--------|----------------|-----------|")
274
- for name, mult in [("INT8", 1.0), ("4-bit (GPTQ/AWQ)", 0.5), ("3-bit", 0.375), ("2-bit (extreme)", 0.25)]:
 
 
 
 
 
 
275
  size = bytes_to_gb(params * mult) * 1.1
276
  fits = "🟢 Yes" if size <= 24 else "🔴 No"
277
- out.append("| " + name + " | " + str(round(size, 1)) + "GB | " + fits + " |")
 
 
 
 
 
 
 
 
 
 
 
278
 
279
  return "\n".join(out)
280
  except Exception as e:
@@ -331,10 +393,46 @@ def compare(models_text, context):
331
  return "Error: " + str(e)
332
 
333
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
  # Build the interface
335
  with gr.Blocks(title="VRAM Calculator", theme=gr.themes.Soft()) as demo:
336
  gr.Markdown("# VRAM Calculator for LLMs")
337
- gr.Markdown("Estimate VRAM requirements for HuggingFace models - inference, training, LoRA, and QLoRA")
338
 
339
  with gr.Tabs():
340
  with gr.TabItem("Calculator"):
@@ -391,8 +489,12 @@ with gr.Blocks(title="VRAM Calculator", theme=gr.themes.Soft()) as demo:
391
  info="Higher = more parameters"
392
  )
393
 
394
- calc_btn = gr.Button("Calculate VRAM", variant="primary")
 
 
 
395
  output = gr.Markdown()
 
396
 
397
  calc_btn.click(
398
  fn=calculate,
@@ -400,6 +502,16 @@ with gr.Blocks(title="VRAM Calculator", theme=gr.themes.Soft()) as demo:
400
  outputs=output
401
  )
402
 
 
 
 
 
 
 
 
 
 
 
403
  gr.Markdown("### Popular Models")
404
  gr.Examples(
405
  examples=[
@@ -415,6 +527,8 @@ with gr.Blocks(title="VRAM Calculator", theme=gr.themes.Soft()) as demo:
415
  ["google/gemma-2-27b"],
416
  ["microsoft/phi-3-mini-4k-instruct"],
417
  ["deepseek-ai/DeepSeek-V2-Lite"],
 
 
418
  ],
419
  inputs=[model_in],
420
  label="Click to load"
@@ -450,37 +564,101 @@ with gr.Blocks(title="VRAM Calculator", theme=gr.themes.Soft()) as demo:
450
  ["mistralai/Mistral-7B-v0.1\nmistralai/Mixtral-8x7B-v0.1"],
451
  ["Qwen/Qwen2.5-7B\nQwen/Qwen2.5-14B\nQwen/Qwen2.5-72B"],
452
  ["google/gemma-2-2b\ngoogle/gemma-2-9b\ngoogle/gemma-2-27b"],
 
453
  ],
454
  inputs=[cmp_in],
455
  label="Click to load comparison"
456
  )
457
 
458
  with gr.TabItem("GPU Reference"):
459
- gr.Markdown("## GPU VRAM Reference")
 
 
460
  gr.Markdown("### Consumer GPUs (NVIDIA GeForce)")
461
- consumer_md = "| GPU | VRAM | Notes |\n|-----|------|-------|\n"
462
- for gpu, vram in CONSUMER_GPUS.items():
463
- consumer_md += "| " + gpu + " | " + str(vram) + "GB | Consumer |\n"
 
 
 
 
 
 
464
  gr.Markdown(consumer_md)
465
 
466
  gr.Markdown("### Apple Silicon")
467
- apple_md = "| Chip | Unified Memory | Notes |\n|------|----------------|-------|\n"
468
- for gpu, vram in APPLE_GPUS.items():
469
- apple_md += "| " + gpu + " | " + str(vram) + "GB | Shared CPU/GPU |\n"
470
  gr.Markdown(apple_md)
471
 
472
  gr.Markdown("### Cloud/Datacenter GPUs")
473
- cloud_md = "| GPU | VRAM | Typical $/hr | Best For |\n|-----|------|--------------|----------|\n"
474
- for gpu, (vram, cost) in CLOUD_GPUS.items():
475
  if vram <= 24:
476
  use = "7B models, fine-tuning"
477
  elif vram <= 48:
478
  use = "13B-30B models"
479
  else:
480
  use = "70B+ models, training"
481
- cloud_md += "| " + gpu + " | " + str(vram) + "GB | $" + str(round(cost, 2)) + " | " + use + " |\n"
482
  gr.Markdown(cloud_md)
483
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
484
  gr.Markdown("---")
485
  gr.Markdown("*Estimates are approximate. Actual usage varies by implementation, batch size, and optimizations.*")
486
 
 
8
 
9
  api = HfApi()
10
 
11
+ # Consumer GPUs: (VRAM GB, Memory Bandwidth GB/s)
12
  CONSUMER_GPUS = {
13
+ "RTX 3080": (10, 760),
14
+ "RTX 3080 Ti": (12, 912),
15
+ "RTX 3090": (24, 936),
16
+ "RTX 3090 Ti": (24, 1008),
17
+ "RTX 4080": (16, 717),
18
+ "RTX 4080 Super": (16, 736),
19
+ "RTX 4090": (24, 1008),
20
+ "RTX 5090": (32, 1792),
21
  }
22
 
23
+ # Apple Silicon: (Unified Memory GB, Memory Bandwidth GB/s)
24
  APPLE_GPUS = {
25
+ "M1 Max": (64, 400),
26
+ "M2 Max": (96, 400),
27
+ "M2 Ultra": (192, 800),
28
+ "M3 Max": (128, 400),
29
+ "M4 Max": (128, 546),
30
  }
31
 
32
+ # Cloud/Datacenter GPUs: (VRAM GB, $/hr, Memory Bandwidth GB/s)
33
  CLOUD_GPUS = {
34
+ "T4": (16, 0.35, 320),
35
+ "L4": (24, 0.70, 300),
36
+ "A10G": (24, 1.00, 600),
37
+ "RTX A5000": (24, 0.80, 768),
38
+ "RTX A6000": (48, 1.50, 768),
39
+ "L40S": (48, 1.20, 864),
40
+ "A100 40GB": (40, 3.00, 1555),
41
+ "A100 80GB": (80, 5.00, 2039),
42
+ "H100 80GB": (80, 8.00, 3350),
43
+ "H100 NVL": (94, 10.00, 3938),
44
  }
45
 
46
  DTYPE_BYTES = {
 
59
  "Ollama": 1.08,
60
  }
61
 
 
 
 
 
 
 
 
 
 
62
 
63
  def bytes_to_gb(b):
64
  return b / (1024 ** 3)
 
92
  return 0, "F16"
93
 
94
 
95
+ def estimate_throughput(params, bandwidth_gbs, batch_size, dtype_bytes):
96
+ """Estimate tokens/second based on memory bandwidth (rough approximation)"""
97
+ # Simplified: tok/s ~ bandwidth / (params * dtype_bytes / batch_size)
98
+ # This is a rough estimate; actual throughput depends on many factors
99
+ model_gb = (params * dtype_bytes) / (1024**3)
100
+ if model_gb == 0:
101
+ return 0
102
+ # Rough heuristic: memory-bound inference
103
+ tokens_per_sec = (bandwidth_gbs / model_gb) * batch_size * 0.5 # 50% efficiency factor
104
+ return max(1, int(tokens_per_sec))
105
+
106
+
107
  def calculate(model_id, context, batch, mode, framework, num_gpus, lora_rank):
108
  """Main calculation function"""
109
  try:
 
131
  layers = config.get("num_hidden_layers", config.get("n_layer", 32))
132
  kv_heads = config.get("num_key_value_heads", config.get("num_attention_heads", 32))
133
  head_dim = config.get("head_dim", 128)
134
+ hidden_size = config.get("hidden_size", 4096)
135
+ num_heads = config.get("num_attention_heads", 32)
136
+ vocab_size = config.get("vocab_size", 32000)
137
+ intermediate_size = config.get("intermediate_size", hidden_size * 4)
138
+ max_position = config.get("max_position_embeddings", 4096)
139
+
140
  if not head_dim:
141
+ head_dim = hidden_size // num_heads if num_heads else 128
 
 
142
 
143
  kv_bytes = 2 * layers * batch * context * kv_heads * head_dim * dtype_bytes
144
  kv_gb = bytes_to_gb(kv_bytes)
 
148
  out.append("**" + str(round(params_b, 1)) + "B parameters** | " + dtype + " | " + str(layers) + " layers")
149
  out.append("")
150
 
151
+ # Architecture details
152
+ out.append("### Model Architecture")
153
+ out.append("| Property | Value |")
154
+ out.append("|----------|-------|")
155
+ out.append("| Hidden Size | " + str(hidden_size) + " |")
156
+ out.append("| Attention Heads | " + str(num_heads) + " |")
157
+ out.append("| KV Heads (GQA) | " + str(kv_heads) + " |")
158
+ out.append("| Layers | " + str(layers) + " |")
159
+ out.append("| Vocab Size | " + str(vocab_size) + " |")
160
+ out.append("| Max Context | " + str(max_position) + " |")
161
+ if kv_heads != num_heads:
162
+ out.append("| GQA Ratio | " + str(num_heads) + ":" + str(kv_heads) + " |")
163
+ out.append("")
164
+
165
  if mode == "Training (Full)":
166
  grad_gb = weights_gb
167
  opt_gb = bytes_to_gb(params * 8)
 
220
  out.append("")
221
  out.append("## Total Required: " + str(round(total, 1)) + " GB")
222
 
223
+ # Consumer GPUs section with colors and throughput
224
  out.append("")
225
  out.append("### Consumer GPUs")
226
+ out.append("| GPU | VRAM | Status | Headroom | Est. tok/s |")
227
+ out.append("|-----|------|--------|----------|------------|")
228
+ for gpu, (vram, bandwidth) in CONSUMER_GPUS.items():
229
  hr = vram - effective
230
  if hr >= 2:
231
  status = "🟢 Good fit"
232
  elif hr >= 0:
233
+ status = "🟡 Tight"
234
  else:
235
+ status = "🔴 No"
236
  sign = "+" if hr >= 0 else ""
237
+ if hr >= 0 and mode == "Inference":
238
+ tps = estimate_throughput(params, bandwidth, batch, dtype_bytes)
239
+ tps_str = str(tps)
240
+ else:
241
+ tps_str = "-"
242
+ out.append("| " + gpu + " | " + str(vram) + "GB | " + status + " | " + sign + str(round(hr, 1)) + "GB | " + tps_str + " |")
243
 
244
  # Apple Silicon section
245
  out.append("")
246
  out.append("### Apple Silicon (Unified Memory)")
247
+ out.append("| Chip | Memory | Status | Headroom | Est. tok/s |")
248
+ out.append("|------|--------|--------|----------|------------|")
249
+ for gpu, (vram, bandwidth) in APPLE_GPUS.items():
250
  hr = vram - effective
251
  if hr >= 10:
252
  status = "🟢 Excellent"
253
  elif hr >= 0:
254
  status = "🟡 Usable"
255
  else:
256
+ status = "🔴 No"
257
  sign = "+" if hr >= 0 else ""
258
+ if hr >= 0 and mode == "Inference":
259
+ tps = estimate_throughput(params, bandwidth, batch, dtype_bytes)
260
+ tps_str = str(tps)
261
+ else:
262
+ tps_str = "-"
263
+ out.append("| " + gpu + " | " + str(vram) + "GB | " + status + " | " + sign + str(round(hr, 1)) + "GB | " + tps_str + " |")
264
 
265
+ # Cloud GPUs section with costs and throughput
266
  out.append("")
267
  out.append("### Cloud GPU Options")
268
+ out.append("| GPU | VRAM | Status | $/hour | $/day | Est. tok/s |")
269
+ out.append("|-----|------|--------|--------|-------|------------|")
270
 
271
  cloud_options = []
272
+ for gpu, (vram, cost, bandwidth) in CLOUD_GPUS.items():
273
  hr = vram - effective
274
  if hr >= 2:
275
  status = "🟢 Good"
 
278
  else:
279
  status = "🔴 No"
280
  daily = cost * 8
281
+ if hr >= 0 and mode == "Inference":
282
+ tps = estimate_throughput(params, bandwidth, batch, dtype_bytes)
283
+ else:
284
+ tps = 0
285
+ cloud_options.append((gpu, vram, hr, status, cost, daily, bandwidth, tps))
286
 
287
  # Sort by cost for those that fit
288
  cloud_options.sort(key=lambda x: (x[2] < 0, x[4]))
289
 
290
+ for gpu, vram, hr, status, cost, daily, bandwidth, tps in cloud_options:
291
  sign = "+" if hr >= 0 else ""
292
+ tps_str = str(tps) if tps > 0 else "-"
293
+ out.append("| " + gpu + " | " + str(vram) + "GB | " + status + " | $" + str(round(cost, 2)) + " | $" + str(round(daily, 2)) + " | " + tps_str + " |")
294
 
295
  # Best value recommendation
296
+ fitting_gpus = [(gpu, cost, tps) for gpu, vram, hr, status, cost, daily, bw, tps in cloud_options if hr >= 0]
297
  if fitting_gpus:
298
  fitting_gpus.sort(key=lambda x: x[1])
299
  best = fitting_gpus[0]
300
  out.append("")
301
+ rec = "**Recommended:** " + best[0] + " at $" + str(round(best[1], 2)) + "/hour"
302
+ if best[2] > 0:
303
+ rec += " (~" + str(best[2]) + " tok/s)"
304
+ out.append(rec)
305
+
306
+ # Best performance option
307
+ if len(fitting_gpus) > 1:
308
+ fitting_gpus.sort(key=lambda x: -x[2])
309
+ fastest = fitting_gpus[0]
310
+ if fastest[0] != best[0] and fastest[2] > 0:
311
+ out.append("**Fastest:** " + fastest[0] + " (~" + str(fastest[2]) + " tok/s)")
312
 
313
  # Quantization suggestions if model is large
314
  if effective > 24:
315
  out.append("")
316
  out.append("### Quantization Options (to fit consumer GPUs)")
317
+ out.append("| Method | Est. Size | Fits 24GB | Quality |")
318
+ out.append("|--------|-----------|-----------|---------|")
319
+ quant_options = [
320
+ ("INT8", 1.0, "Excellent"),
321
+ ("4-bit (GPTQ/AWQ)", 0.5, "Very Good"),
322
+ ("3-bit", 0.375, "Good"),
323
+ ("2-bit (extreme)", 0.25, "Degraded"),
324
+ ]
325
+ for name, mult, quality in quant_options:
326
  size = bytes_to_gb(params * mult) * 1.1
327
  fits = "🟢 Yes" if size <= 24 else "🔴 No"
328
+ out.append("| " + name + " | " + str(round(size, 1)) + "GB | " + fits + " | " + quality + " |")
329
+
330
+ # Context scaling info
331
+ out.append("")
332
+ out.append("### Context Length Scaling")
333
+ out.append("| Context | KV Cache | Total Est. |")
334
+ out.append("|---------|----------|------------|")
335
+ for ctx_opt in [2048, 4096, 8192, 16384, 32768]:
336
+ if ctx_opt <= max_position:
337
+ kv_opt = bytes_to_gb(2 * layers * batch * ctx_opt * kv_heads * head_dim * dtype_bytes)
338
+ total_opt = weights_gb + kv_opt
339
+ out.append("| " + str(ctx_opt) + " | " + str(round(kv_opt, 1)) + "GB | " + str(round(total_opt, 1)) + "GB |")
340
 
341
  return "\n".join(out)
342
  except Exception as e:
 
393
  return "Error: " + str(e)
394
 
395
 
396
+ def generate_report(model_id, context, batch, mode, framework, num_gpus, lora_rank):
397
+ """Generate a shareable text report"""
398
+ try:
399
+ if not model_id or not model_id.strip():
400
+ return "Enter a model ID first"
401
+
402
+ result = calculate(model_id, context, batch, mode, framework, num_gpus, lora_rank)
403
+
404
+ report = []
405
+ report.append("=" * 50)
406
+ report.append("VRAM CALCULATOR REPORT")
407
+ report.append("=" * 50)
408
+ report.append("")
409
+ report.append("Settings:")
410
+ report.append(" Model: " + model_id)
411
+ report.append(" Mode: " + mode)
412
+ report.append(" Context: " + str(context))
413
+ report.append(" Batch Size: " + str(batch))
414
+ report.append(" Framework: " + framework)
415
+ report.append(" GPUs: " + str(num_gpus))
416
+ if "LoRA" in mode:
417
+ report.append(" LoRA Rank: " + str(lora_rank))
418
+ report.append("")
419
+ report.append("-" * 50)
420
+ report.append("")
421
+ report.append(result)
422
+ report.append("")
423
+ report.append("-" * 50)
424
+ report.append("Generated by VRAM Calculator")
425
+ report.append("https://huggingface.co/spaces/Livengood/Instance-VRAM-Calculator")
426
+
427
+ return "\n".join(report)
428
+ except Exception as e:
429
+ return "Error generating report: " + str(e)
430
+
431
+
432
  # Build the interface
433
  with gr.Blocks(title="VRAM Calculator", theme=gr.themes.Soft()) as demo:
434
  gr.Markdown("# VRAM Calculator for LLMs")
435
+ gr.Markdown("Estimate VRAM requirements and throughput for HuggingFace models")
436
 
437
  with gr.Tabs():
438
  with gr.TabItem("Calculator"):
 
489
  info="Higher = more parameters"
490
  )
491
 
492
+ with gr.Row():
493
+ calc_btn = gr.Button("Calculate VRAM", variant="primary")
494
+ export_btn = gr.Button("Export Report", variant="secondary")
495
+
496
  output = gr.Markdown()
497
+ export_output = gr.Textbox(label="Exportable Report", lines=10, visible=False)
498
 
499
  calc_btn.click(
500
  fn=calculate,
 
502
  outputs=output
503
  )
504
 
505
+ def show_export(model_id, context, batch, mode, framework, num_gpus, lora_rank):
506
+ report = generate_report(model_id, context, batch, mode, framework, num_gpus, lora_rank)
507
+ return gr.update(visible=True, value=report)
508
+
509
+ export_btn.click(
510
+ fn=show_export,
511
+ inputs=[model_in, ctx_in, batch_in, mode_in, framework_in, gpus_in, lora_in],
512
+ outputs=export_output
513
+ )
514
+
515
  gr.Markdown("### Popular Models")
516
  gr.Examples(
517
  examples=[
 
527
  ["google/gemma-2-27b"],
528
  ["microsoft/phi-3-mini-4k-instruct"],
529
  ["deepseek-ai/DeepSeek-V2-Lite"],
530
+ ["NousResearch/Hermes-3-Llama-3.1-8B"],
531
+ ["01-ai/Yi-1.5-34B"],
532
  ],
533
  inputs=[model_in],
534
  label="Click to load"
 
564
  ["mistralai/Mistral-7B-v0.1\nmistralai/Mixtral-8x7B-v0.1"],
565
  ["Qwen/Qwen2.5-7B\nQwen/Qwen2.5-14B\nQwen/Qwen2.5-72B"],
566
  ["google/gemma-2-2b\ngoogle/gemma-2-9b\ngoogle/gemma-2-27b"],
567
+ ["meta-llama/Llama-3.1-8B\nmistralai/Mistral-7B-v0.1\nQwen/Qwen2.5-7B\ngoogle/gemma-2-9b"],
568
  ],
569
  inputs=[cmp_in],
570
  label="Click to load comparison"
571
  )
572
 
573
  with gr.TabItem("GPU Reference"):
574
+ gr.Markdown("## GPU VRAM & Bandwidth Reference")
575
+ gr.Markdown("Memory bandwidth significantly affects inference speed (tokens/second)")
576
+
577
  gr.Markdown("### Consumer GPUs (NVIDIA GeForce)")
578
+ consumer_md = "| GPU | VRAM | Bandwidth | Best For |\n|-----|------|-----------|----------|\n"
579
+ for gpu, (vram, bw) in CONSUMER_GPUS.items():
580
+ if vram <= 12:
581
+ use = "Small models (3-7B)"
582
+ elif vram <= 16:
583
+ use = "7B models"
584
+ else:
585
+ use = "7B-13B models, fine-tuning"
586
+ consumer_md += "| " + gpu + " | " + str(vram) + "GB | " + str(bw) + " GB/s | " + use + " |\n"
587
  gr.Markdown(consumer_md)
588
 
589
  gr.Markdown("### Apple Silicon")
590
+ apple_md = "| Chip | Unified Memory | Bandwidth | Notes |\n|------|----------------|-----------|-------|\n"
591
+ for gpu, (vram, bw) in APPLE_GPUS.items():
592
+ apple_md += "| " + gpu + " | " + str(vram) + "GB | " + str(bw) + " GB/s | Shared CPU/GPU |\n"
593
  gr.Markdown(apple_md)
594
 
595
  gr.Markdown("### Cloud/Datacenter GPUs")
596
+ cloud_md = "| GPU | VRAM | Bandwidth | $/hr | Best For |\n|-----|------|-----------|------|----------|\n"
597
+ for gpu, (vram, cost, bw) in CLOUD_GPUS.items():
598
  if vram <= 24:
599
  use = "7B models, fine-tuning"
600
  elif vram <= 48:
601
  use = "13B-30B models"
602
  else:
603
  use = "70B+ models, training"
604
+ cloud_md += "| " + gpu + " | " + str(vram) + "GB | " + str(bw) + " GB/s | $" + str(round(cost, 2)) + " | " + use + " |\n"
605
  gr.Markdown(cloud_md)
606
 
607
+ gr.Markdown("### Understanding Throughput")
608
+ gr.Markdown("""
609
+ **Tokens per second (tok/s)** estimates are based on memory bandwidth and model size.
610
+
611
+ - **Memory-bound inference**: Most LLM inference is limited by how fast weights can be loaded from memory
612
+ - **Bandwidth formula**: `tok/s ≈ (bandwidth / model_size) × batch_size × efficiency`
613
+ - **Batching**: Higher batch sizes improve throughput but use more VRAM for KV cache
614
+ - **Quantization**: 4-bit models load 4x faster but may have quality tradeoffs
615
+
616
+ *Estimates are approximate. Actual performance depends on implementation, optimizations, and workload.*
617
+ """)
618
+
619
+ with gr.TabItem("Tips & Guide"):
620
+ gr.Markdown("""
621
+ ## Quick Guide
622
+
623
+ ### Choosing the Right Mode
624
+
625
+ | Mode | Use Case | VRAM Multiplier |
626
+ |------|----------|-----------------|
627
+ | **Inference** | Running predictions | 1x weights + KV cache |
628
+ | **Training (Full)** | Training from scratch | 4-6x weights |
629
+ | **LoRA** | Fine-tuning with adapters | 1.3x weights |
630
+ | **QLoRA** | Memory-efficient fine-tuning | 0.5x weights + adapters |
631
+
632
+ ### VRAM Rule of Thumb
633
+
634
+ - **Inference**: `params × 2 bytes` (FP16) + KV cache
635
+ - **Training**: `params × 18-20 bytes` (weights + gradients + optimizer + activations)
636
+ - **QLoRA**: `params × 0.5-0.6 bytes` (4-bit) + small adapter overhead
637
+
638
+ ### Fitting Large Models
639
+
640
+ 1. **Use quantization** (INT8, 4-bit) to reduce memory 2-4x
641
+ 2. **Reduce context length** to shrink KV cache
642
+ 3. **Use multi-GPU** for tensor parallelism
643
+ 4. **Try QLoRA** instead of full fine-tuning
644
+
645
+ ### Recommended Setups
646
+
647
+ | Model Size | Inference | QLoRA Training |
648
+ |------------|-----------|----------------|
649
+ | 7B | RTX 3090/4090 (24GB) | RTX 3090/4090 |
650
+ | 13B | A10G or 2x RTX 3090 | RTX 4090 (4-bit) |
651
+ | 30B | A100 40GB or 2x RTX 4090 | A10G (4-bit) |
652
+ | 70B | A100 80GB or 4x RTX 4090 | A100 40GB (4-bit) |
653
+
654
+ ### Cost Optimization Tips
655
+
656
+ 1. **Start small**: Test with smaller models first
657
+ 2. **Use spot instances**: 60-90% cheaper for training
658
+ 3. **Right-size**: Don't overpay for unused VRAM
659
+ 4. **Consider Apple Silicon**: M2/M3/M4 Max good for local inference
660
+ """)
661
+
662
  gr.Markdown("---")
663
  gr.Markdown("*Estimates are approximate. Actual usage varies by implementation, batch size, and optimizations.*")
664