pingnie commited on
Commit
2acbfac
1 Parent(s): e1f274b

add gpu info

Browse files
Files changed (2) hide show
  1. backend-cli.py +2 -1
  2. src/display/utils.py +3 -5
backend-cli.py CHANGED
@@ -440,7 +440,8 @@ if __name__ == "__main__":
440
  "mistralai/Mixtral-8x22B-v0.1", "mistralai/Mixtral-8x22B-Instruct-v0.1", "alpindale/WizardLM-2-8x22B",
441
  "CohereForAI/c4ai-command-r-plus"] # Use model from arguments
442
  debug_task_name = ['mmlu', 'selfcheckgpt'] # Use task from arguments
443
- precisions = ['4bit', 'float32', 'float16', '8bit']
 
444
  task_lst = TASKS_HARNESS.copy()
445
  for precision in precisions:
446
  for debug_model_name in debug_model_names:
 
440
  "mistralai/Mixtral-8x22B-v0.1", "mistralai/Mixtral-8x22B-Instruct-v0.1", "alpindale/WizardLM-2-8x22B",
441
  "CohereForAI/c4ai-command-r-plus"] # Use model from arguments
442
  debug_task_name = ['mmlu', 'selfcheckgpt'] # Use task from arguments
443
+ # precisions = ['4bit', '8bit']
444
+ precisions = ['float32', 'float16']
445
  task_lst = TASKS_HARNESS.copy()
446
  for precision in precisions:
447
  for debug_model_name in debug_model_names:
src/display/utils.py CHANGED
@@ -105,16 +105,14 @@ for task in Tasks:
105
  # System performance metrics
106
  auto_eval_column_dict.append([f"{task.name}_end_to_end_time", ColumnContent, ColumnContent(f"{task.value.col_name} {E2Es}", "number", True)])
107
  auto_eval_column_dict.append([f"{task.name}_batch_size", ColumnContent, ColumnContent(f"{task.value.col_name} {BATCH_SIZE}", "number", True)])
 
108
  auto_eval_column_dict.append([f"{task.name}_gpu", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Name}", "str", True)])
 
109
  if task.value.benchmark in MULTIPLE_CHOICEs:
110
  continue
111
- auto_eval_column_dict.append([f"{task.name}_prefilling_time", ColumnContent, ColumnContent(f"{task.value.col_name} {PREs}", "number", False)])
112
  auto_eval_column_dict.append([f"{task.name}_decoding_throughput", ColumnContent, ColumnContent(f"{task.value.col_name} {TS}", "number", True)])
113
 
114
- auto_eval_column_dict.append([f"{task.name}_gpu_mem", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Mem}", "number", True)])
115
- auto_eval_column_dict.append([f"{task.name}_gpu_power", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Power}", "number", False)])
116
- auto_eval_column_dict.append([f"{task.name}_gpu_temp", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_TEMP}", "number", False)])
117
- auto_eval_column_dict.append([f"{task.name}_gpu_util", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Util}", "number", True)])
118
 
119
  # Model information
120
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
 
105
  # System performance metrics
106
  auto_eval_column_dict.append([f"{task.name}_end_to_end_time", ColumnContent, ColumnContent(f"{task.value.col_name} {E2Es}", "number", True)])
107
  auto_eval_column_dict.append([f"{task.name}_batch_size", ColumnContent, ColumnContent(f"{task.value.col_name} {BATCH_SIZE}", "number", True)])
108
+ auto_eval_column_dict.append([f"{task.name}_gpu_mem", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Mem}", "number", True)])
109
  auto_eval_column_dict.append([f"{task.name}_gpu", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Name}", "str", True)])
110
+ auto_eval_column_dict.append([f"{task.name}_gpu_util", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Util}", "number", True)])
111
  if task.value.benchmark in MULTIPLE_CHOICEs:
112
  continue
113
+ # auto_eval_column_dict.append([f"{task.name}_prefilling_time", ColumnContent, ColumnContent(f"{task.value.col_name} {PREs}", "number", False)])
114
  auto_eval_column_dict.append([f"{task.name}_decoding_throughput", ColumnContent, ColumnContent(f"{task.value.col_name} {TS}", "number", True)])
115
 
 
 
 
 
116
 
117
  # Model information
118
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])