Clémentine commited on
Commit
ba25d90
1 Parent(s): c6b775f

do not display models with an empty metric result

Browse files
src/auto_leaderboard/load_results.py CHANGED
@@ -91,8 +91,8 @@ def parse_eval_result(json_filepath: str) -> Tuple[str, list[dict]]:
91
 
92
  eval_results = []
93
  for benchmark, metric in zip(BENCHMARKS, METRICS):
94
- accs = np.array([v.get(metric, 0) for k, v in data["results"].items() if benchmark in k])
95
- if accs.size == 0:
96
  continue
97
  mean_acc = np.mean(accs) * 100.0
98
  eval_results.append(EvalResult(
 
91
 
92
  eval_results = []
93
  for benchmark, metric in zip(BENCHMARKS, METRICS):
94
+ accs = np.array([v.get(metric, None) for k, v in data["results"].items() if benchmark in k])
95
+ if accs.size == 0 or any([acc is None for acc in accs]):
96
  continue
97
  mean_acc = np.mean(accs) * 100.0
98
  eval_results.append(EvalResult(