Alan Liu commited on
Commit
3849813
1 Parent(s): c93009d
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -45,7 +45,7 @@ subtotal_parameters = [
45
  'embedding_weights',
46
  'attention_weights',
47
  'mlp_weights',
48
- 'model_total_size'
49
  ]
50
 
51
  subtotal_operations = [
@@ -112,7 +112,7 @@ with col2:
112
  parameter_count['embedding_weights'] = parameter_count['word_embedding'] + parameter_count['positional_embedding']
113
  parameter_count['attention_weights'] = parameter_count['attention_out'] + parameter_count['attention_Q'] + parameter_count['attention_K'] + parameter_count['attention_V']
114
  parameter_count['mlp_weights'] = parameter_count['mlp1'] + parameter_count['mlp2']
115
- parameter_count['model_total_size'] = inference_config['byte_per_parameter'] * (
116
  parameter_count['embedding_weights'] +
117
  parameter_count['attention_weights'] +
118
  parameter_count['mlp_weights'] +
 
45
  'embedding_weights',
46
  'attention_weights',
47
  'mlp_weights',
48
+ 'model_total_size (Byte)'
49
  ]
50
 
51
  subtotal_operations = [
 
112
  parameter_count['embedding_weights'] = parameter_count['word_embedding'] + parameter_count['positional_embedding']
113
  parameter_count['attention_weights'] = parameter_count['attention_out'] + parameter_count['attention_Q'] + parameter_count['attention_K'] + parameter_count['attention_V']
114
  parameter_count['mlp_weights'] = parameter_count['mlp1'] + parameter_count['mlp2']
115
+ parameter_count['model_total_size (Byte)'] = inference_config['byte_per_parameter'] * (
116
  parameter_count['embedding_weights'] +
117
  parameter_count['attention_weights'] +
118
  parameter_count['mlp_weights'] +