Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
move model type sync outside
Browse files- src/leaderboard/read_evals.py +14 -15
- src/populate.py +5 -2
src/leaderboard/read_evals.py
CHANGED
@@ -128,18 +128,6 @@ class EvalResult:
|
|
128 |
except Exception as e:
|
129 |
print(f"Could not find request file for {self.org}/{self.model} -- path: {requests_path} -- {e}")
|
130 |
|
131 |
-
def update_model_type_with_open_llm_request_file(self, open_llm_requests_path):
|
132 |
-
"""Finds the relevant request file for the current model and updates info with it"""
|
133 |
-
request_file = get_request_file_for_model_open_llm(open_llm_requests_path, self.full_model, self.precision.value.name)
|
134 |
-
|
135 |
-
if request_file:
|
136 |
-
try:
|
137 |
-
with open(request_file, "r") as f:
|
138 |
-
request = json.load(f)
|
139 |
-
self.model_type = ModelType.from_str(request.get("model_type", "Unknown"))
|
140 |
-
except Exception as e:
|
141 |
-
pass
|
142 |
-
|
143 |
def is_complete(self) -> bool:
|
144 |
for task in Tasks:
|
145 |
if task.value.benchmark not in self.results:
|
@@ -216,10 +204,23 @@ def get_request_file_for_model_open_llm(requests_path, model_name, precision):
|
|
216 |
request_file = tmp_request_file
|
217 |
return request_file
|
218 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
|
220 |
def get_raw_eval_results(results_path: str,
|
221 |
requests_path: str,
|
222 |
-
requests_path_open_llm: Optional[str] = None,
|
223 |
is_backend: bool = False) -> list[EvalResult]:
|
224 |
"""From the path of the results folder root, extract all needed info for results"""
|
225 |
model_result_filepaths = []
|
@@ -243,8 +244,6 @@ def get_raw_eval_results(results_path: str,
|
|
243 |
# Creation of result
|
244 |
eval_result = EvalResult.init_from_json_file(model_result_filepath, is_backend=is_backend)
|
245 |
eval_result.update_with_request_file(requests_path)
|
246 |
-
if requests_path_open_llm is not None:
|
247 |
-
eval_result.update_model_type_with_open_llm_request_file(requests_path_open_llm)
|
248 |
# Store results of same eval together
|
249 |
eval_name = eval_result.eval_name
|
250 |
if eval_name in eval_results.keys():
|
|
|
128 |
except Exception as e:
|
129 |
print(f"Could not find request file for {self.org}/{self.model} -- path: {requests_path} -- {e}")
|
130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
def is_complete(self) -> bool:
|
132 |
for task in Tasks:
|
133 |
if task.value.benchmark not in self.results:
|
|
|
204 |
request_file = tmp_request_file
|
205 |
return request_file
|
206 |
|
207 |
+
def update_model_type_with_open_llm_request_file(result, open_llm_requests_path):
|
208 |
+
"""Finds the relevant request file for the current model and updates info with it"""
|
209 |
+
request_file = get_request_file_for_model_open_llm(open_llm_requests_path, result.full_model, result.precision.value.name)
|
210 |
+
|
211 |
+
if request_file:
|
212 |
+
try:
|
213 |
+
with open(request_file, "r") as f:
|
214 |
+
request = json.load(f)
|
215 |
+
open_llm_model_type = request.get("model_type", "Unknown")
|
216 |
+
if open_llm_model_type != "Unknown":
|
217 |
+
result.model_type = ModelType.from_str(open_llm_model_type)
|
218 |
+
except Exception as e:
|
219 |
+
pass
|
220 |
+
return result
|
221 |
|
222 |
def get_raw_eval_results(results_path: str,
|
223 |
requests_path: str,
|
|
|
224 |
is_backend: bool = False) -> list[EvalResult]:
|
225 |
"""From the path of the results folder root, extract all needed info for results"""
|
226 |
model_result_filepaths = []
|
|
|
244 |
# Creation of result
|
245 |
eval_result = EvalResult.init_from_json_file(model_result_filepath, is_backend=is_backend)
|
246 |
eval_result.update_with_request_file(requests_path)
|
|
|
|
|
247 |
# Store results of same eval together
|
248 |
eval_name = eval_result.eval_name
|
249 |
if eval_name in eval_results.keys():
|
src/populate.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
import json
|
2 |
import os
|
3 |
-
|
4 |
import copy
|
5 |
import pandas as pd
|
6 |
|
7 |
from src.display.formatting import has_no_nan_values, make_clickable_model
|
8 |
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
9 |
from src.leaderboard.filter_models import filter_models
|
10 |
-
from src.leaderboard.read_evals import get_raw_eval_results, EvalResult
|
11 |
|
12 |
from src.backend.envs import Tasks as BackendTasks
|
13 |
from src.display.utils import Tasks
|
@@ -21,6 +21,9 @@ def get_leaderboard_df(results_path: str,
|
|
21 |
is_backend: bool = False) -> tuple[list[EvalResult], pd.DataFrame]:
|
22 |
# Returns a list of EvalResult
|
23 |
raw_data: list[EvalResult] = get_raw_eval_results(results_path, requests_path, requests_path_open_llm)
|
|
|
|
|
|
|
24 |
|
25 |
all_data_json_ = [v.to_dict() for v in raw_data if v.is_complete()]
|
26 |
|
|
|
1 |
import json
|
2 |
import os
|
3 |
+
from tqdm import tqdm
|
4 |
import copy
|
5 |
import pandas as pd
|
6 |
|
7 |
from src.display.formatting import has_no_nan_values, make_clickable_model
|
8 |
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
9 |
from src.leaderboard.filter_models import filter_models
|
10 |
+
from src.leaderboard.read_evals import get_raw_eval_results, EvalResult, update_model_type_with_open_llm_request_file
|
11 |
|
12 |
from src.backend.envs import Tasks as BackendTasks
|
13 |
from src.display.utils import Tasks
|
|
|
21 |
is_backend: bool = False) -> tuple[list[EvalResult], pd.DataFrame]:
|
22 |
# Returns a list of EvalResult
|
23 |
raw_data: list[EvalResult] = get_raw_eval_results(results_path, requests_path, requests_path_open_llm)
|
24 |
+
for result_idx in tqdm(range(len(raw_data)), desc="updating model type with open llm leaderboard"):
|
25 |
+
if requests_path_open_llm:
|
26 |
+
raw_data[result_idx] = update_model_type_with_open_llm_request_file(raw_data[result_idx], requests_path_open_llm)
|
27 |
|
28 |
all_data_json_ = [v.to_dict() for v in raw_data if v.is_complete()]
|
29 |
|