Xenova HF staff commited on
Commit
c29646a
1 Parent(s): d2179b0

Set to 0 if metric not found

Browse files
src/auto_leaderboard/load_results.py CHANGED
@@ -91,7 +91,7 @@ def parse_eval_result(json_filepath: str) -> Tuple[str, list[dict]]:
91
 
92
  eval_results = []
93
  for benchmark, metric in zip(BENCHMARKS, METRICS):
94
- accs = np.array([v[metric] for k, v in data["results"].items() if benchmark in k])
95
  if accs.size == 0:
96
  continue
97
  mean_acc = np.mean(accs) * 100.0
 
91
 
92
  eval_results = []
93
  for benchmark, metric in zip(BENCHMARKS, METRICS):
94
+ accs = np.array([v.get(metric, 0) for k, v in data["results"].items() if benchmark in k])
95
  if accs.size == 0:
96
  continue
97
  mean_acc = np.mean(accs) * 100.0