Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update src/leaderboard/read_evals.py
Browse files
src/leaderboard/read_evals.py
CHANGED
@@ -181,6 +181,7 @@ def get_request_file_for_model(requests_path, model_name, precision):
|
|
181 |
f"{model_name}_eval_request_*.json",
|
182 |
)
|
183 |
request_files = glob.glob(request_files)
|
|
|
184 |
|
185 |
# Select correct request file (precision)
|
186 |
request_file = ""
|
@@ -188,21 +189,23 @@ def get_request_file_for_model(requests_path, model_name, precision):
|
|
188 |
for tmp_request_file in request_files:
|
189 |
with open(tmp_request_file, "r") as f:
|
190 |
req_content = json.load(f)
|
|
|
191 |
if (
|
192 |
req_content["status"] in ["FINISHED"]
|
193 |
and req_content["precision"] == precision.split(".")[-1]
|
194 |
):
|
195 |
request_file = tmp_request_file
|
|
|
|
|
|
|
|
|
196 |
return request_file
|
197 |
|
198 |
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
199 |
"""From the path of the results folder root, extract all needed info for results"""
|
200 |
model_result_filepaths = []
|
201 |
|
202 |
-
for root,
|
203 |
-
print(f"Current directory: {root}")
|
204 |
-
print(f"Subdirectories: {dirs}")
|
205 |
-
print(f"Files: {files}")
|
206 |
# We should only have json files in model results
|
207 |
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
208 |
continue
|
@@ -216,11 +219,9 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
216 |
for file in files:
|
217 |
model_result_filepaths.append(os.path.join(root, file))
|
218 |
|
219 |
-
print(f"Model result filepaths found: {model_result_filepaths}")
|
220 |
|
221 |
eval_results = {}
|
222 |
for model_result_filepath in model_result_filepaths:
|
223 |
-
print(f"Processing result file: {model_result_filepath}")
|
224 |
# Creation of result
|
225 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
226 |
eval_result.update_with_request_file(requests_path)
|
@@ -232,13 +233,6 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
232 |
else:
|
233 |
eval_results[eval_name] = eval_result
|
234 |
|
235 |
-
# デバッグ: 最終的なデータ確認
|
236 |
-
if eval_result:
|
237 |
-
data_dict = eval_result.to_dict()
|
238 |
-
print(f"Final leaderboard entry: {data_dict}")
|
239 |
-
else:
|
240 |
-
print("No valid eval result was processed.")
|
241 |
-
|
242 |
data_dict = eval_result.to_dict()
|
243 |
|
244 |
results = []
|
|
|
181 |
f"{model_name}_eval_request_*.json",
|
182 |
)
|
183 |
request_files = glob.glob(request_files)
|
184 |
+
print(f"Found request files: {request_files}")
|
185 |
|
186 |
# Select correct request file (precision)
|
187 |
request_file = ""
|
|
|
189 |
for tmp_request_file in request_files:
|
190 |
with open(tmp_request_file, "r") as f:
|
191 |
req_content = json.load(f)
|
192 |
+
print(f"Request file content: {req_content}")
|
193 |
if (
|
194 |
req_content["status"] in ["FINISHED"]
|
195 |
and req_content["precision"] == precision.split(".")[-1]
|
196 |
):
|
197 |
request_file = tmp_request_file
|
198 |
+
if request_file:
|
199 |
+
print(f"Using request file: {request_file}")
|
200 |
+
else:
|
201 |
+
print("No matching request file found.")
|
202 |
return request_file
|
203 |
|
204 |
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
205 |
"""From the path of the results folder root, extract all needed info for results"""
|
206 |
model_result_filepaths = []
|
207 |
|
208 |
+
for root, _, files in os.walk(results_path):
|
|
|
|
|
|
|
209 |
# We should only have json files in model results
|
210 |
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
211 |
continue
|
|
|
219 |
for file in files:
|
220 |
model_result_filepaths.append(os.path.join(root, file))
|
221 |
|
|
|
222 |
|
223 |
eval_results = {}
|
224 |
for model_result_filepath in model_result_filepaths:
|
|
|
225 |
# Creation of result
|
226 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
227 |
eval_result.update_with_request_file(requests_path)
|
|
|
233 |
else:
|
234 |
eval_results[eval_name] = eval_result
|
235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
data_dict = eval_result.to_dict()
|
237 |
|
238 |
results = []
|