Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
eduagarcia
commited on
Commit
•
aa7060a
1
Parent(s):
7625ef6
Add raw results links if exists, and fix minor issues
Browse files- src/display/formatting.py +7 -4
- src/envs.py +1 -1
- src/leaderboard/read_evals.py +1 -0
src/display/formatting.py
CHANGED
@@ -4,7 +4,7 @@ from datetime import datetime, timezone
|
|
4 |
from huggingface_hub import HfApi
|
5 |
from huggingface_hub.hf_api import ModelInfo
|
6 |
|
7 |
-
from src.envs import RESULTS_REPO, QUEUE_REPO
|
8 |
|
9 |
API = HfApi()
|
10 |
|
@@ -28,9 +28,12 @@ def make_clickable_model(model_name, json_path=None):
|
|
28 |
link = f"https://huggingface.co/{model_name}"
|
29 |
|
30 |
#details_model_name = model_name.replace("/", "__")
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
34 |
|
35 |
return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "📑")
|
36 |
|
|
|
4 |
from huggingface_hub import HfApi
|
5 |
from huggingface_hub.hf_api import ModelInfo
|
6 |
|
7 |
+
from src.envs import RESULTS_REPO, QUEUE_REPO, RAW_RESULTS_REPO
|
8 |
|
9 |
API = HfApi()
|
10 |
|
|
|
28 |
link = f"https://huggingface.co/{model_name}"
|
29 |
|
30 |
#details_model_name = model_name.replace("/", "__")
|
31 |
+
if RAW_RESULTS_REPO is not None:
|
32 |
+
details_link = f"https://huggingface.co/datasets/{RAW_RESULTS_REPO}/tree/main/{model_name}"
|
33 |
+
else:
|
34 |
+
details_link = f"https://huggingface.co/datasets/{RESULTS_REPO}/tree/main/{model_name}"
|
35 |
+
if json_path is not None:
|
36 |
+
details_link = f"https://huggingface.co/datasets/{RESULTS_REPO}/blob/main/{model_name}/{json_path}"
|
37 |
|
38 |
return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "📑")
|
39 |
|
src/envs.py
CHANGED
@@ -32,7 +32,7 @@ REPO_ID = get_config("REPO_ID", "HuggingFaceH4/open_llm_leaderboard")
|
|
32 |
QUEUE_REPO = get_config("QUEUE_REPO", "open-llm-leaderboard/requests")
|
33 |
DYNAMIC_INFO_REPO = get_config("DYNAMIC_INFO_REPO", "open-llm-leaderboard/dynamic_model_information")
|
34 |
RESULTS_REPO = get_config("RESULTS_REPO", "open-llm-leaderboard/results")
|
35 |
-
RAW_RESULTS_REPO = get_config("
|
36 |
|
37 |
PRIVATE_QUEUE_REPO = QUEUE_REPO
|
38 |
PRIVATE_RESULTS_REPO = RESULTS_REPO
|
|
|
32 |
QUEUE_REPO = get_config("QUEUE_REPO", "open-llm-leaderboard/requests")
|
33 |
DYNAMIC_INFO_REPO = get_config("DYNAMIC_INFO_REPO", "open-llm-leaderboard/dynamic_model_information")
|
34 |
RESULTS_REPO = get_config("RESULTS_REPO", "open-llm-leaderboard/results")
|
35 |
+
RAW_RESULTS_REPO = get_config("RAW_RESULTS_REPO", None)
|
36 |
|
37 |
PRIVATE_QUEUE_REPO = QUEUE_REPO
|
38 |
PRIVATE_RESULTS_REPO = RESULTS_REPO
|
src/leaderboard/read_evals.py
CHANGED
@@ -256,6 +256,7 @@ def get_raw_eval_results(results_path: str, requests_path: str, dynamic_path: st
|
|
256 |
eval_name = eval_result.eval_name
|
257 |
if eval_name in eval_results.keys():
|
258 |
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
|
|
259 |
else:
|
260 |
eval_results[eval_name] = eval_result
|
261 |
|
|
|
256 |
eval_name = eval_result.eval_name
|
257 |
if eval_name in eval_results.keys():
|
258 |
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
259 |
+
eval_results[eval_name].json_filename = eval_result.json_filename
|
260 |
else:
|
261 |
eval_results[eval_name] = eval_result
|
262 |
|