derek-thomas HF staff commited on
Commit
5d70faf
1 Parent(s): ef8c30b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -73
app.py CHANGED
@@ -12,86 +12,105 @@ def convert_params(params):
12
  s = round(params / p, 2)
13
  return "%s %s" % (s, size_name[i])
14
 
15
- # Set defaults for missing arguments
16
- def set_defaults(args, defaults):
17
- for key, value in defaults.items():
18
- if getattr(args, key) is None:
19
- setattr(args, key, value)
20
- return args
21
-
22
- # Set value if it's None, else use the config value
23
- def set_if_none(args, key, config, config_key, defaults):
24
- if getattr(args, key) is None:
25
- setattr(args, key, config.get(config_key, defaults[key]))
26
- return args
27
-
28
- # Get Hugging Face model arguments
29
- def get_hf_model_args(args, defaults):
30
- if args.hf_model_name_or_path:
31
  try:
32
- config = AutoConfig.from_pretrained(args.hf_model_name_or_path, trust_remote_code=True).to_dict()
33
  except Exception as e:
34
- raise gr.Error(f"Error fetching Hugging Face model: {str(e)}")
35
 
36
- # Update arguments with Hugging Face model config values
37
- args.num_layers = config.get("num_hidden_layers", defaults["num_layers"])
38
- args.hidden_size = config.get("hidden_size", defaults["hidden_size"])
39
- args.num_attention_heads = config.get("num_attention_heads", defaults["num_attention_heads"])
40
- args.vocab_size = config.get("vocab_size", defaults["vocab_size"])
41
- args.sequence_length = config.get("max_position_embeddings", defaults["sequence_length"])
42
-
43
- return set_defaults(args, defaults)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  # ---- Memory Calculation ---- #
46
  def calc_mem(hf_model_name_or_path, num_gpus, tensor_parallel_size, pipeline_parallel_size, batch_size_per_gpu, sequence_length, vocab_size, hidden_size, num_attention_heads, num_layers, ffn_expansion_factor, is_mixed_precision, misc_mem_gib):
 
 
 
 
47
 
48
- # Define defaults
49
- defaults = {
50
- "num_layers": 44,
51
- "hidden_size": 6144,
52
- "num_attention_heads": 64,
53
- "vocab_size": 51200,
54
- "sequence_length": 2048,
55
- "ffn_expansion_factor": 4,
56
- }
57
 
58
- # Create a simple args object to simulate parsed arguments
59
- class Args:
60
- def __init__(self, **kwargs):
61
- for key, value in kwargs.items():
62
- setattr(self, key, value)
63
-
64
- args = Args(hf_model_name_or_path=hf_model_name_or_path, num_gpus=num_gpus, tensor_parallel_size=tensor_parallel_size,
65
- pipeline_parallel_size=pipeline_parallel_size, batch_size_per_gpu=batch_size_per_gpu, sequence_length=sequence_length,
66
- vocab_size=vocab_size, hidden_size=hidden_size, num_attention_heads=num_attention_heads, num_layers=num_layers,
67
- ffn_expansion_factor=ffn_expansion_factor, is_mixed_precision=is_mixed_precision, misc_mem_gib=misc_mem_gib)
68
-
69
- # Fetch Hugging Face model args if a model is provided
70
- args = get_hf_model_args(args, defaults)
71
-
72
- dp_degree = args.num_gpus / (args.tensor_parallel_size * args.pipeline_parallel_size)
73
- embed_params = 2 * args.vocab_size * args.hidden_size
74
- positional_params = args.hidden_size * args.sequence_length
75
- ln_params = 8 * args.hidden_size * args.num_layers + (2 * args.hidden_size)
76
- attention_params = int(2 * (1 + args.ffn_expansion_factor) * args.num_layers * args.hidden_size * args.hidden_size)
77
- mlp_params = args.ffn_expansion_factor * args.num_layers * args.hidden_size * args.hidden_size
78
  total_params = embed_params + positional_params + ln_params + attention_params + mlp_params
79
 
80
- bytes_per_param = 2 if args.is_mixed_precision else 4
81
  model_mem = total_params * bytes_per_param
82
- per_gpu_mem_gib = (model_mem / (args.tensor_parallel_size * args.pipeline_parallel_size)) / 1024**3 + args.misc_mem_gib
83
 
84
  return f"Per-GPU Memory Required for Training: {per_gpu_mem_gib:.2f} GiB"
85
 
 
 
 
 
 
 
86
  # ---- Gradio Interface ---- #
87
  with gr.Blocks() as demo:
88
  with gr.Tabs():
89
- with gr.TabItem("Parameter Calculation"):
 
90
  vocab_size = gr.Number(label="Vocab Size", value=51200)
91
  tied_embeddings = gr.Checkbox(label="Tied Embeddings", value=False)
92
  hidden_size = gr.Number(label="Hidden Size", value=6144)
93
  sequence_length = gr.Number(label="Sequence Length", value=2048)
94
  num_layers = gr.Number(label="Number of Layers", value=44)
 
95
  ffn_expansion_factor = gr.Number(label="FFN Expansion Factor", value=4)
96
  num_mlp_linears = gr.Number(label="Number of Linear Layers per MLP Block", value=2)
97
  kv_size_ratio = gr.Number(label="KV Size Ratio", value=1.0)
@@ -102,27 +121,17 @@ with gr.Blocks() as demo:
102
  expert_interval = gr.Number(label="Expert Interval", value=1)
103
  topk = gr.Number(label="Top k Routing", value=1)
104
 
105
- result = gr.Textbox(label="Output", interactive=False)
106
- calculate_button = gr.Button("Calculate")
107
- calculate_button.click(calc_params, inputs=[vocab_size, tied_embeddings, hidden_size, sequence_length, num_layers, moe, num_experts, expert_interval, topk, ffn_expansion_factor, num_mlp_linears, kv_size_ratio], outputs=result)
108
-
109
- with gr.TabItem("Memory Calculation"):
110
- hf_model_name_or_path = gr.Textbox(label="HuggingFace Model Name or Path", value="")
111
  num_gpus = gr.Number(label="Number of GPUs", value=1)
112
  tensor_parallel_size = gr.Number(label="Tensor Parallel Size", value=1)
113
  pipeline_parallel_size = gr.Number(label="Pipeline Parallel Size", value=1)
114
  batch_size_per_gpu = gr.Number(label="Batch Size per GPU", value=8)
115
- sequence_length = gr.Number(label="Sequence Length", value=2048)
116
- vocab_size = gr.Number(label="Vocab Size", value=51200)
117
- hidden_size = gr.Number(label="Hidden Size", value=6144)
118
- num_attention_heads = gr.Number(label="Number of Attention Heads", value=64)
119
- num_layers = gr.Number(label="Number of Layers", value=44)
120
- ffn_expansion_factor = gr.Number(label="FFN Expansion Factor", value=4)
121
  is_mixed_precision = gr.Checkbox(label="Mixed Precision", value=True)
122
  misc_mem_gib = gr.Number(label="Misc Memory Overhead (GiB)", value=5)
123
 
124
- memory_result = gr.Textbox(label="Memory Calculation Result", interactive=False)
125
- calc_memory_button = gr.Button("Calculate Memory")
126
- calc_memory_button.click(calc_mem, inputs=[hf_model_name_or_path, num_gpus, tensor_parallel_size, pipeline_parallel_size, batch_size_per_gpu, sequence_length, vocab_size, hidden_size, num_attention_heads, num_layers, ffn_expansion_factor, is_mixed_precision, misc_mem_gib], outputs=memory_result)
 
 
127
 
128
  demo.launch()
 
12
  s = round(params / p, 2)
13
  return "%s %s" % (s, size_name[i])
14
 
15
+ # Get Hugging Face model configuration and update the parameters
16
+ def get_hf_model_args(hf_model_name_or_path, num_layers, hidden_size, num_attention_heads, vocab_size, sequence_length):
17
+ if hf_model_name_or_path:
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  try:
19
+ config = AutoConfig.from_pretrained(hf_model_name_or_path, trust_remote_code=True).to_dict()
20
  except Exception as e:
21
+ return None, f"Error fetching Hugging Face model: {str(e)}"
22
 
23
+ # Update parameters with the Hugging Face model config values
24
+ num_layers = config.get("num_hidden_layers", num_layers)
25
+ hidden_size = config.get("hidden_size", hidden_size)
26
+ num_attention_heads = config.get("num_attention_heads", num_attention_heads)
27
+ vocab_size = config.get("vocab_size", vocab_size)
28
+ sequence_length = config.get("max_position_embeddings", sequence_length)
29
+
30
+ return {
31
+ "num_layers": num_layers,
32
+ "hidden_size": hidden_size,
33
+ "num_attention_heads": num_attention_heads,
34
+ "vocab_size": vocab_size,
35
+ "sequence_length": sequence_length,
36
+ }, None
37
+
38
+ # ---- Parameter Calculation ---- #
39
+ def calc_params(vocab_size, tied_embeddings, hidden_size, sequence_length, num_layers, moe, num_experts, expert_interval, topk, ffn_expansion_factor, num_mlp_linears, kv_size_ratio):
40
+ if tied_embeddings:
41
+ embedding_params = hidden_size * vocab_size
42
+ else:
43
+ embedding_params = 2 * hidden_size * vocab_size
44
+ position_embedding_params = hidden_size * sequence_length
45
+ attention_params = int(2 * (1 + kv_size_ratio) * num_layers * hidden_size * hidden_size)
46
+ layernorm_params = 13 * num_layers * hidden_size
47
+
48
+ if moe:
49
+ num_expert_layers = num_layers / expert_interval
50
+ ffn_expert_params = num_mlp_linears * ffn_expansion_factor * num_expert_layers * num_experts * hidden_size * hidden_size
51
+ ffn_dense_params = num_mlp_linears * ffn_expansion_factor * (num_layers - num_expert_layers) * hidden_size * hidden_size
52
+ ffn_params = ffn_expert_params + ffn_dense_params
53
+ gating_params = num_expert_layers * hidden_size * num_experts
54
+ else:
55
+ ffn_params = num_mlp_linears * ffn_expansion_factor * num_layers * hidden_size * hidden_size
56
+
57
+ total_params = embedding_params + attention_params + ffn_params + position_embedding_params + layernorm_params
58
+
59
+ if moe:
60
+ total_params += gating_params
61
+
62
+ return f"""
63
+ Embedding parameters: {convert_params(embedding_params)}
64
+ Attention parameters: {convert_params(attention_params)}
65
+ FFN parameters: {convert_params(ffn_params)}
66
+ {'Gating parameters: ' + convert_params(gating_params) if moe else ''}
67
+ Total Params in the Model: {convert_params(total_params)}
68
+ """
69
 
70
  # ---- Memory Calculation ---- #
71
  def calc_mem(hf_model_name_or_path, num_gpus, tensor_parallel_size, pipeline_parallel_size, batch_size_per_gpu, sequence_length, vocab_size, hidden_size, num_attention_heads, num_layers, ffn_expansion_factor, is_mixed_precision, misc_mem_gib):
72
+ model_params, hf_error = get_hf_model_args(hf_model_name_or_path, num_layers, hidden_size, num_attention_heads, vocab_size, sequence_length)
73
+
74
+ if hf_error:
75
+ return hf_error
76
 
77
+ num_layers = model_params["num_layers"]
78
+ hidden_size = model_params["hidden_size"]
79
+ num_attention_heads = model_params["num_attention_heads"]
80
+ vocab_size = model_params["vocab_size"]
81
+ sequence_length = model_params["sequence_length"]
 
 
 
 
82
 
83
+ dp_degree = num_gpus / (tensor_parallel_size * pipeline_parallel_size)
84
+ embed_params = 2 * vocab_size * hidden_size
85
+ positional_params = hidden_size * sequence_length
86
+ ln_params = 8 * hidden_size * num_layers + (2 * hidden_size)
87
+ attention_params = int(2 * (1 + ffn_expansion_factor) * num_layers * hidden_size * hidden_size)
88
+ mlp_params = ffn_expansion_factor * num_layers * hidden_size * hidden_size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  total_params = embed_params + positional_params + ln_params + attention_params + mlp_params
90
 
91
+ bytes_per_param = 2 if is_mixed_precision else 4
92
  model_mem = total_params * bytes_per_param
93
+ per_gpu_mem_gib = (model_mem / (tensor_parallel_size * pipeline_parallel_size)) / 1024**3 + misc_mem_gib
94
 
95
  return f"Per-GPU Memory Required for Training: {per_gpu_mem_gib:.2f} GiB"
96
 
97
+ # Combine param calculation and memory calculation in the result
98
+ def calculate_model(hf_model_name_or_path, tied_embeddings, num_gpus, tensor_parallel_size, pipeline_parallel_size, batch_size_per_gpu, sequence_length, vocab_size, hidden_size, num_attention_heads, num_layers, ffn_expansion_factor, num_mlp_linears, kv_size_ratio, moe, num_experts, expert_interval, topk, is_mixed_precision, misc_mem_gib):
99
+ param_result = calc_params(vocab_size, tied_embeddings, hidden_size, sequence_length, num_layers, moe, num_experts, expert_interval, topk, ffn_expansion_factor, num_mlp_linears, kv_size_ratio)
100
+ mem_result = calc_mem(hf_model_name_or_path, num_gpus, tensor_parallel_size, pipeline_parallel_size, batch_size_per_gpu, sequence_length, vocab_size, hidden_size, num_attention_heads, num_layers, ffn_expansion_factor, is_mixed_precision, misc_mem_gib)
101
+ return param_result + "\n" + mem_result
102
+
103
  # ---- Gradio Interface ---- #
104
  with gr.Blocks() as demo:
105
  with gr.Tabs():
106
+ with gr.TabItem("Model Calculation"):
107
+ hf_model_name_or_path = gr.Textbox(label="HuggingFace Model Name or Path (optional)", value="")
108
  vocab_size = gr.Number(label="Vocab Size", value=51200)
109
  tied_embeddings = gr.Checkbox(label="Tied Embeddings", value=False)
110
  hidden_size = gr.Number(label="Hidden Size", value=6144)
111
  sequence_length = gr.Number(label="Sequence Length", value=2048)
112
  num_layers = gr.Number(label="Number of Layers", value=44)
113
+ num_attention_heads = gr.Number(label="Number of Attention Heads", value=64)
114
  ffn_expansion_factor = gr.Number(label="FFN Expansion Factor", value=4)
115
  num_mlp_linears = gr.Number(label="Number of Linear Layers per MLP Block", value=2)
116
  kv_size_ratio = gr.Number(label="KV Size Ratio", value=1.0)
 
121
  expert_interval = gr.Number(label="Expert Interval", value=1)
122
  topk = gr.Number(label="Top k Routing", value=1)
123
 
 
 
 
 
 
 
124
  num_gpus = gr.Number(label="Number of GPUs", value=1)
125
  tensor_parallel_size = gr.Number(label="Tensor Parallel Size", value=1)
126
  pipeline_parallel_size = gr.Number(label="Pipeline Parallel Size", value=1)
127
  batch_size_per_gpu = gr.Number(label="Batch Size per GPU", value=8)
 
 
 
 
 
 
128
  is_mixed_precision = gr.Checkbox(label="Mixed Precision", value=True)
129
  misc_mem_gib = gr.Number(label="Misc Memory Overhead (GiB)", value=5)
130
 
131
+ result = gr.Textbox(label="Output", interactive=False)
132
+ calculate_button = gr.Button("Calculate")
133
+ calculate_button.click(calculate_model,
134
+ inputs=[hf_model_name_or_path, tied_embeddings, num_gpus, tensor_parallel_size, pipeline_parallel_size, batch_size_per_gpu, sequence_length, vocab_size, hidden_size, num_attention_heads, num_layers, ffn_expansion_factor, num_mlp_linears, kv_size_ratio, moe, num_experts, expert_interval, topk, is_mixed_precision, misc_mem_gib],
135
+ outputs=result)
136
 
137
  demo.launch()